diff --git "a/4596.jsonl" "b/4596.jsonl" new file mode 100644--- /dev/null +++ "b/4596.jsonl" @@ -0,0 +1,696 @@ +{"seq_id":"17853869851","text":"# -*- coding: utf-8 -*-\n# 用Python展示Excel中20个常用操作\n\nimport pandas as pd\nimport numpy as np\nimport warnings\nimport matplotlib.pyplot as plt\nwarnings.filterwarnings('ignore')\n\ndf=pd.read_excel('data/excel_pandas/示例数据.xlsx')\n\n#data=pd.DataFrame(np.random.randint(2,5,size=(10,2))) #可以用np.random.randint中的size方法生成随机整数矩阵\ndata=pd.DataFrame(np.random.rand(10,2)) #np.random.rand()生成0-1之间满足均匀分布的10*2矩阵\n#data.to_excel('测试数据.xlsx') #数据保存\n\n#print(df[df['薪资水平']>5000])\n\n#数据划分\nbins=[0,10000,max(df['薪资水平'])]\nlabels=['low','high']\ndf['new_col']=pd.cut(df['薪资水平'],bins=bins,labels=labels)\n#print(df)\n\n#print(max(df['薪资水平'])) #max(df['薪资水平'])是返回df['薪资水平']列的最大值\n#print(df['薪资水平'].max) #df['薪资水平'].max是返回df['薪资水平']每一行的最大值\n\n#df=df.sort_values(by='薪资水平',ascending=False)\n#print(df)\n\n#数据去重\ndf.drop_duplicates(['创建时间'],inplace=True) #inplace=True表示在原df上进行操作\n#print(df)\n\n#修改df['创建时间']列的格式\ndf['创建时间']=df['创建时间'].dt.strftime('%Y-%m-%d')\n#print(df)\n\n#print(df.columns) #df.columns格式类似list,可以进行索引操作,可以传入列表参数,更改列的顺序\n#new_col=df.columns[[0,2,1,3,4,5,6]]\n#print(df[new_col])\n\ndf['new']=df['地址'] + df['岗位'] #合并两列内容\n#print(df)\n\n#print(df['技能要求'].str.split(',',expand=True)) #df['技能要求'].str经过str转换后,可以用str内置的一些方法进行字符串处理\n\n#print(df.groupby('学历').mean())\n\n#print(len(df[df['薪资水平']>10000]))\n\n#print(df['薪资水平'].describe())\n\n#df['薪资水平'].hist() #绘制直方图\n#plt.show()\n\n#print(df.sample(20)) #数据抽样\n\n#pivot_table,透视表,可以按照自己设定的索引index,并提供值,会生成相应的分析汇总信息\n#pivot_table(data, values=None, index=None, columns=None,aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All')\n#pivot_table有四个最重要的参数index、values、columns、aggfunc\n#index就是层次字段,要通过透视表获取什么信息就按照相应的顺序设置字段\n#values可以对需要的计算数据进行筛选\n#aggfunc参数可以设置我们对数据聚合时进行的函数操作,默认aggfunc='mean',多运算可以写成aggfunc=[np.sum,np.mean]\n#columns类似Index可以设置列层次字段,它不是一个必要参数,作为一种分割数据的可选方式。\n#print(pd.pivot_table(df,index=[\"工作经验\",\"学历\"],values=[\"薪资水平\"]))\n\nprint(df.head())\nprint(df.tail())","repo_name":"roberpan/python","sub_path":"dataanalyse/learn/exceltest.py","file_name":"exceltest.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20793291963","text":"'''\nAuthor : MiKueen\nLevel : Medium\nProblem Statement : Search in Rotated Sorted Array\n\nSuppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.\n(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).\nYou are given a target value to search. If found in the array return its index, otherwise return -1.\nYou may assume no duplicate exists in the array.\nYour algorithm's runtime complexity must be in the order of O(log n).\n\nExample 1:\nInput: nums = [4,5,6,7,0,1,2], target = 0\nOutput: 4\n\nExample 2:\nInput: nums = [4,5,6,7,0,1,2], target = 3\nOutput: -1\n'''\n\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n low , high = 0, len(nums)\n while low < high:\n mid = (low + high) // 2\n if nums[mid] < nums[0] <= target: \n high = mid\n elif nums[mid] > nums[0] > target: \n low = mid + 1\n elif nums[mid] < target:\n low = mid + 1\n elif nums[mid] > target:\n high = mid\n else:\n return mid\n return -1\n ","repo_name":"MiKueen/Data-Structures-and-Algorithms","sub_path":"Leetcode/0001-0050/0033-search-in-rotated-sorted-array.py","file_name":"0033-search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26148272433","text":"# Databricks notebook source\n# MAGIC %run \"./script import csv\"\n\n# COMMAND ----------\n\n# CAlculate the duration on minutes\ndef get_file_duration(path):\n modification_time_ms = path.modificationTime\n modification_time = datetime.fromtimestamp(modification_time_ms / 1000) # Divide by 1000 to convert milliseconds to minute\n duration = (datetime.now() - modification_time).total_seconds() / 60\n return duration\n\n# COMMAND ----------\n\n# Archive files from the raw directory\ndef archived_raw_files(raw_paths):\n for path in raw_paths:\n file_duration = get_file_duration(path)\n # check if the duration \n if file_duration >= 5:\n # get the raw directory\n source_directory = path.path\n # get the archived directory\n destination_directory = f\"abfss://{container_name}@{storage_account_name}.dfs.core.windows.net/public_transport_data/archived/{path.name}\"\n dbutils.fs.mv(source_directory, destination_directory,recurse = True)\n\n# COMMAND ----------\n\n# Delete the archived files\ndef delete_archived_files(archived_paths):\n for path in archived_paths:\n file_duration = get_file_duration(path)\n # check if the duration \n if file_duration >= 10:\n # get the raw directory\n source_directory = path.path\n # get the archived directory\n destination_directory = f\"abfss://{container_name}@{storage_account_name}.dfs.core.windows.net/public_transport_data/archived/{path.name}\"\n dbutils.fs.rm(destination_directory,recurse = True)\n\n# COMMAND ----------\n\nfrom datetime import datetime\n\nstorage_account_name = \"tarifihicham1cs\"\nstorage_account_access_key = \"OCGL4AOQKWaFu6lezWKGDCVXDe7534tiifLMFUgdrPm6YJ3Vff3CMX5EGbxwIXGgBkdqnO6xomBP+ASti5On2w==\"\ncontainer_name = \"tarifihichamcontainer\"\n\n# get files path\nfiles_paths = get_file_path(storage_account_name,storage_account_access_key,container_name)\n\n# Execute function to apply the conservation policies\narchived_raw_files(files_paths[0])\ndelete_archived_files(files_paths[2])\n\n","repo_name":"Tarifi-Hicham/Public_Transport_DataBricks","sub_path":"script conservation.py","file_name":"script conservation.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"4"} +{"seq_id":"28767522657","text":"with open(\"inputday5.txt\") as f:\n content = f.readlines()\n\nsuit = list(content[0])\n#suit = ['d', 'a', 'b', 'A', 'c', 'C', 'a', 'C', 'B', 'A', 'c', 'C', 'c', 'a', 'D', 'A']\nsuit = [x for x in suit if x != 'p' and x != 'P']\nsuit = [ord(x) for x in suit]\n\nprint(suit)\n\n\nleft = 0\n\nwhile left < len(suit)-1:\n if left < 0:\n left = 0\n right = left + 1\n if abs(suit[left] - suit[right]) == 32:\n del suit[left:left+2]\n left = left - 1\n else:\n left = left + 1\n\n\nprint(len(suit))\n\n\n","repo_name":"chongguang/adventOfCode2018","sub_path":"day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20789391621","text":"# -*- coding=utf-8 -*-\n#@author:liuAmon\n#@contact:utopfish@163.com\n#@file:c0002addTwoNumbers.py\n#@time: 2019/10/27 12:33\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n n = l1.val + l2.val\n l3 = ListNode(n % 10)\n l3.next = ListNode(n // 10)\n p1 = l1.next\n p2 = l2.next\n p3 = l3\n while True:\n if p1 and p2:\n sum = p1.val + p2.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p1 = p1.next\n p2 = p2.next\n p3 = p3.next\n elif p1 and not p2:\n sum = p1.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p1 = p1.next\n p3 = p3.next\n elif not p1 and p2:\n sum = p2.val + p3.next.val\n p3.next.val = sum % 10\n p3.next.next = ListNode(sum // 10)\n p2 = p2.next\n p3 = p3.next\n else:\n if p3.next.val == 0:\n p3.next = None\n break\n return l3\n\n\n\n","repo_name":"utopfish/LeetCodeCamp","sub_path":"python/middle/c0002addTwoNumbers.py","file_name":"c0002addTwoNumbers.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10384614033","text":"from ast import literal_eval as make_list\nimport numpy as np\nimport networkx as nx\n\nimport callflow\nfrom callflow.utils.df import df_info\nfrom callflow.utils.nxg import nxg_info\n\nLOGGER = callflow.get_logger(__name__)\n\n\n# ------------------------------------------------------------------------------\nclass Filter:\n \"\"\"\n Filters a SuperGraph.\n \"\"\"\n\n VALID_MODES = [\"time\", \"time (inc)\"]\n\n def __init__(self, sg, filter_by=\"time (inc)\", filter_perc=10.0):\n \"\"\"\n Constructor to the filter operation.\n :param sg: SuperGraph\n :param filter_by: filter by metric, can be \"time (inc)\" or \"time\"\n :param filter_perc: filter percentage\n \"\"\"\n assert isinstance(sg, callflow.SuperGraph)\n assert isinstance(filter_by, str) and isinstance(filter_perc, (int, float))\n assert filter_by in Filter.VALID_MODES\n assert 0.0 <= filter_perc <= 100.0\n\n self.sg = sg\n self.filter_by = filter_by\n self.filter_perc = filter_perc\n LOGGER.info(\n f'Filtering ({self.sg}) by \"{self.filter_by}\" = {self.filter_perc}%'\n )\n\n # TODO: Since we factorize the name and module column after creating\n # the CallFlow.dataframe, we need to filter by the callsite indexes.\n self.callsites = self.sg.callsites_idx\n\n # if 0:\n self.mean_root_inctime = self.sg.df_root_max_mean_runtime(\n self.sg.roots, \"time (inc)\"\n )\n\n # Formulate the hatchet query.\n query = [\n (\n \"*\",\n {\n f\"{self.sg.df_get_proxy(filter_by)}\": f\"> {filter_perc * 0.01 * self.mean_root_inctime}\"\n },\n )\n ]\n\n LOGGER.info(f\"Filtering GraphFrame by Hatchet Query :{query}\")\n LOGGER.debug(f\"Number of callsites before QueryMatcher: {len(self.callsites)}\")\n\n self.callsites = self.sg.hatchet_filter_callsites_by_query(query)\n\n LOGGER.debug(f\"Number of callsites after QueryMatcher: {len(self.callsites)}\")\n LOGGER.info(\n f\"Removed {len(self.sg.callsites_idx) - len(self.callsites)} callsites.\"\n )\n\n self.compute()\n LOGGER.info(f'Filtered graph: \"{nxg_info(self.nxg)}\"')\n self.sg.nxg = self.nxg\n\n # --------------------------------------------------------------------------\n def compute(self):\n \"\"\"\n Filter the SuperGraph based on {filter_by} attribute and {filter_perc} percentage.\n \"\"\"\n # compute the min/max\n min_vals = {}\n max_vals = {}\n for mode in Filter.VALID_MODES:\n _mn, _mx = self.sg.df_minmax(mode)\n min_vals[mode] = np.array([_mn])\n max_vals[mode] = np.array([_mx])\n LOGGER.debug(f\"{mode}: min = {_mn}, max = {_mx}\")\n\n value = self.filter_perc * 0.01 * np.max(max_vals[self.filter_by])\n self._filter_sg(self.filter_by, value)\n\n # --------------------------------------------------------------------------\n def _filter_sg(self, filter_by, filter_val):\n \"\"\"\n Performs in-place filtering based on parameters\n\n :param filter_by (str): Attribute to filter by. (can be \"time\" or \"time (inc)\"\n :param filter_val (int): Filter percentage\n :return nxg (networkx.graph):\n \"\"\"\n LOGGER.debug(f'Filtering {self.__str__()}: \"{filter_by}\" <= {filter_val}')\n\n if len(self.callsites) > 0:\n self.sg.dataframe = self.sg.dataframe[\n self.sg.dataframe[\"name\"].isin(self.callsites)\n ]\n LOGGER.info(f'Filtered dataframe: \"{df_info(self.sg.dataframe)}\"')\n\n nxg = nx.DiGraph()\n\n if filter_by == \"time (inc)\":\n for edge in self.sg.nxg.edges():\n edge0_idx = self.sg.get_idx(edge[0], \"callsite\")\n edge1_idx = self.sg.get_idx(edge[1], \"callsite\")\n # If source is present in the callsites list\n if (edge0_idx in self.callsites) and (edge1_idx in self.callsites):\n nxg.add_edge(edge[0], edge[1])\n # else:\n # LOGGER.debug(f\"Removing the edge: {edge}\")\n\n elif filter_by == \"time\":\n for callsite in self.callsites:\n path = self.sg.df_lookup_with_column(\"name\", callsite)[\"path\"].tolist()[\n 0\n ]\n path = make_list(path)\n nxg.add_path(path)\n\n self.nxg = nxg\n\n\n# ------------------------------------------------------------------------------\n","repo_name":"LLNL/CallFlow","sub_path":"callflow/operations/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"4"} +{"seq_id":"14771146083","text":"import time\nimport traceback\nfrom functools import wraps\nfrom typing import List, Optional\n\nimport telegram\nfrom telegram.ext import DelayQueue\n\nfrom src.utils.logger_helpers import get_logger\nfrom src.utils.mwt import MWT\n\nlogger = get_logger(__name__)\n\ndef dsp_catch(e: Exception):\n logger.error('DSP raise exception:')\n traceback.print_exception(Exception, e, e.__traceback__)\n\n\ndsp = DelayQueue(burst_limit=20, time_limit_ms=1017, exc_route=dsp_catch)\n\n\ndef telegram_retry(tries=4, delay=3, backoff=2, logger=None, silence: bool = False, default=None,\n title: Optional[str] = None):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n def log(msg: str) -> None:\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n\n stitle = f'[{title}] ' if title else ''\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except Exception as e:\n found = False\n if isinstance(e, telegram.error.TimedOut):\n found = True\n if isinstance(e, telegram.error.RetryAfter):\n log(f'{stitle}Flood limit, wait 5 sec')\n time.sleep(5)\n found = True\n if not found:\n break\n log(f'{stitle}{e}, Retrying in {mdelay} seconds...')\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n log(f'{stitle}breaked')\n if silence and default:\n return default\n if not silence:\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry\n\n\n@MWT(timeout=5 * 60)\ndef get_chat_admins(bot: telegram.Bot, chat_id: int) -> List[telegram.ChatMember]:\n \"\"\"\n Возвращает список админов чата. Результаты кэшируются на 5 минут.\n \"\"\"\n\n @telegram_retry(logger=logger, silence=True, default=[], title='get_chat_admins')\n def bot_get_chat_administrators(bot: telegram.Bot, chat_id: int) -> List[telegram.ChatMember]:\n return bot.get_chat_administrators(chat_id)\n\n return bot_get_chat_administrators(bot, chat_id)\n\n\n@telegram_retry(logger=logger, title='get_photo_url')\ndef get_photo_url(bot: telegram.Bot, message: telegram.Message) -> str:\n return bot.get_file(message.photo[-1].file_id).file_path\n","repo_name":"pongo/rapturebot","sub_path":"src/utils/telegram_helpers.py","file_name":"telegram_helpers.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"4"} +{"seq_id":"70949242357","text":"from fastapi.testclient import TestClient\nfrom app.database import get_db, Base\nfrom app.main import app\nfrom app.config import settings\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nimport pytest\n\n\nSQLALCHEMY_DATABSE_URL = f'postgresql://{settings.database_username}:{settings.database_password}@{settings.database_hostname}:{settings.database_port}/{settings.database_name}_test'\n\n# connnects sqlalchemy to pstgres database\nengine = create_engine(SQLALCHEMY_DATABSE_URL)\n# inorder to talk to the database\nTestingSessionLocal = sessionmaker(\n autocommit=False, autoflush=False, bind=engine)\n\n\n# use this to create a database session and close it after finishing.\n\n\n# app.dependency_overrides[get_db] = override_get_db # swap the dependencies\n\n\n@pytest.fixture\ndef session():\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n db = TestingSessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@pytest.fixture\ndef client(session):\n\n def override_get_db():\n try:\n yield session\n finally:\n session.close()\n app.dependency_overrides[get_db] = override_get_db # swap the dependencies\n # run our code before we run our test\n yield TestClient(app)\n # run our code after we finish our test\n","repo_name":"amirthapa27/fastapi","sub_path":"tests/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"837301010","text":"import random\nimport tensorflow.compat.v1 as tf\n\nfrom utils import weight_variable_glorot\n\ndevices = tf.config.get_visible_devices('GPU')\nif len(devices) == 0:\n devices = tf.config.get_visible_devices()\ndevices = [device.name.replace('physical_device:', '') for device in devices]\n\n# global unique layer ID dictionary for layer name assignment\n_LAYER_UIDS = {}\n\n\ndef get_layer_uid(layer_name=''):\n \"\"\"Helper function, assigns unique layer IDs\n \"\"\"\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]\n\n\ndef dropout_sparse(x, keep_prob, num_nonzero_elems):\n \"\"\"Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)\n \"\"\"\n noise_shape = [num_nonzero_elems]\n random_tensor = keep_prob\n random_tensor += tf.random_uniform(noise_shape)\n dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)\n pre_out = tf.sparse_retain(x, dropout_mask)\n return pre_out * (1./keep_prob)\n\n\nclass Layer(object):\n \"\"\"Base layer class. Defines basic API for all layer objects.\n # Properties\n name: String, defines the variable scope of the layer.\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n \"\"\"\n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n layer = self.__class__.__name__.lower()\n name = layer + '_' + str(get_layer_uid(layer))\n self.name = name\n self.vars = {}\n self.issparse = False\n\n def _call(self, inputs):\n return inputs\n\n def __call__(self, inputs):\n with tf.name_scope(self.name):\n outputs = self._call(inputs)\n return outputs\n\n\nclass GraphConvolution(Layer):\n \"\"\"Basic graph convolution layer for undirected graph without edge labels.\"\"\"\n def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., sparse_inputs=False, act=tf.nn.relu, **kwargs):\n super(GraphConvolution, self).__init__(**kwargs)\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name=\"weights\")\n self.dropout = dropout\n self.adj = adj\n self.act = act\n self.sparse_inputs = sparse_inputs\n self.features_nonzero = features_nonzero\n\n def _call(self, inputs):\n x = inputs\n if self.sparse_inputs:\n x = dropout_sparse(x, 1-self.dropout, self.features_nonzero)\n x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])\n else:\n x = tf.nn.dropout(x, 1-self.dropout)\n x = tf.matmul(x, self.vars['weights'])\n x = tf.sparse_tensor_dense_matmul(self.adj, x)\n outputs = self.act(x)\n return outputs\n\nclass LinearLayer(Layer):\n def __init__(self, input_dim, output_dim, num_graphs=1, dropout=0., act=tf.nn.sigmoid, **kwargs):\n super(LinearLayer, self).__init__(**kwargs)\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weights'] = weight_variable_glorot(input_dim, output_dim * num_graphs, name=\"weights\")\n self.num_graphs = num_graphs\n self.dropout = dropout\n self.act = act\n self.input_dim = input_dim\n\n def _call(self, inputs):\n inputs = tf.nn.dropout(inputs, 1-self.dropout)\n outputs = [None for i in range(self.num_graphs)]\n for sim_idx in range(self.num_graphs):\n with tf.device(devices[sim_idx % len(devices)]):\n outputs[sim_idx] = tf.matmul(inputs, tf.gather(self.vars['weights'], tf.range(self.input_dim) + sim_idx * self.input_dim, axis=1))\n outputs = self.act(tf.concat(outputs, 0))\n return outputs\n\nclass InnerProductDecoder(Layer):\n \"\"\"Decoder model layer for link prediction.\"\"\"\n def __init__(self, input_dim, sim_idx=0, num_graphs=1, dropout=0., act=tf.nn.sigmoid, **kwargs):\n super(InnerProductDecoder, self).__init__(**kwargs)\n self.num_graphs = num_graphs\n self.dropout = dropout\n self.act = act\n self.input_dim = input_dim\n self.sim_idx = sim_idx\n\n def _call(self, inputs):\n inputs = tf.nn.dropout(inputs, 1-self.dropout)\n inputs = tf.split(inputs, self.num_graphs)\n outputs = [None for i in range(self.num_graphs)]\n for sim_idx in range(self.num_graphs):\n with tf.device(devices[sim_idx % len(devices)]):\n outputs[sim_idx] = tf.matmul(inputs[sim_idx], tf.transpose(inputs[sim_idx]))\n outputs = self.act(tf.concat(outputs, 0))\n return outputs","repo_name":"Meng-zhen-Li/Similarity-based-GCN","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"26056842022","text":"alphabet = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя\"\r\ndictAlphabet = {}\r\n\r\nstr1 = input(\"Введите строку: \")\r\nkey = input(\"Введите ключ: \")\r\ntab = []\r\ntab1 = []\r\ntab2 = []\r\ndict1 = {}\r\n\r\ndef cipherSwap(tab, tab2):\r\n b = 0\r\n c = 0 #строка\r\n n = 0 #столбец\r\n list1 = []\r\n a = 1\r\n l = 0\r\n \r\n for i in tab[1]:\r\n list1.append(i)\r\n list1.sort()\r\n\r\n for i in list1:\r\n dict1[a] = i\r\n a += 1\r\n \r\n\r\n for i in range(len(key)):\r\n l = dict1[i + 1]\r\n b = -1\r\n for i1 in tab2[1]:\r\n b += 1\r\n if l == i1:\r\n for i in range((len(str1) // len(key)) + 2) :\r\n tab[c][n] = tab2[c][b]\r\n if c == (len(str1) // len(key)) + 1:\r\n c = 0\r\n else:\r\n c += 1\r\n n += 1\r\n return tab\r\n\r\ndef get_key(value):\r\n for k, v in dictAlphabet.items():\r\n if v == value:\r\n return int(k)\r\n\r\ndef get_value(key):\r\n for k, v in dict1.items():\r\n if k == key:\r\n return int(v)\r\n\r\ndef qwerty(alphabet, str1, key, dictAlphabet, tab, tab2):\r\n b = 0\r\n c = 2\r\n n = 0\r\n for i in key:\r\n tab[0][b] = str(i)\r\n tab[1][b] = get_key(i)\r\n b += 1\r\n for i in str1:\r\n tab[c][n] = i\r\n if c == (len(str1) // len(key)) + 1:\r\n c = 2\r\n n += 1\r\n else:\r\n c += 1\r\n for i in range(len(tab)):\r\n for j in range(len(tab[i])):\r\n tab2[i][j] = tab[i][j]\r\n return tab, tab2\r\n\r\ndef createTab(str1, key, tab):\r\n N = (len(str1) // len(key)) + 2\r\n M = len(key)\r\n \r\n for i in range(N):\r\n tab.append([0]*M)\r\n return tab\r\n\r\ndef addValue(alphabet, dictAlphabet):\r\n a = 1\r\n for i in alphabet:\r\n dictAlphabet[a] = i \r\n a += 1\r\n\r\ndef printMatrix(tab):\r\n for i in range(len(tab)): # len(A) - возвращает количество строк в матрице А\r\n for j in range(len(tab[i])): # len(A[i]) - возвращает количество элементов в строке i\r\n print(tab[i][j], end = ' ')\r\n print() # делаем переход на новую строку \r\n\r\n\r\n \r\n\r\ntab = createTab(str1, key, tab)\r\ntab2 = createTab(str1, key, tab2)\r\naddValue(alphabet, dictAlphabet)\r\ntab, tab2 = qwerty(alphabet, str1, key, dictAlphabet, tab, tab2)\r\ntab = cipherSwap(tab, tab2)\r\nprintMatrix(tab)\r\n","repo_name":"skull24359/labs","sub_path":"lab_1/lab_1_2.py","file_name":"lab_1_2.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1846177501","text":"import importlib\nimport os\n\nfrom jinja2 import PackageLoader, Environment\nfrom utils import proto_to_dict\nfrom utils.formatter import to_underline\n\n\ndef gen_service_cls(pb, data):\n svc = pb.call_service_name()\n values = data\n v = list(values.values())\n clz_name = \"\".join([word.capitalize() for word in svc.split(\".\")])\n interfaces = []\n for i in v:\n def get_args():\n for ii in i['input']:\n for _k, _v in ii.items():\n arg, (_type, tag) = _k, _v\n ori = f\"{arg}: {_type}\"\n if tag != 'required':\n \"level : str = None\"\n ori += \" = None\"\n yield ori\n\n tl = list(get_args())\n ll = sorted(tl, key=lambda x: \" = None\" in x)\n args = \", \".join(ll)\n interface = {\"path\": i['full_name'], \"method_name\": i['name'], \"args\": args}\n interfaces.append(interface)\n\n data = {\"cls_name\": clz_name, \"service_name\": svc, \"interfaces\": interfaces}\n env = Environment(\n loader=PackageLoader(\"data\", \"templates\"),\n keep_trailing_newline=True,\n line_statement_prefix=\"##\",\n line_comment_prefix=\"###\",\n trim_blocks=True,\n lstrip_blocks=True,\n )\n t = env.get_template(\"cls_demo\")\n content = t.render(data=data)\n print(content)\n\n with open(f\"services/{to_underline(clz_name).lower()}.py\", \"w\") as f:\n f.write(content)\n\n\nif __name__ == '__main__':\n # 拿data下的所有proto\n pb2_list = os.popen(\"find bapis_python -name *_pb2.py\").readlines()\n # 过滤不需要解析的proto\n\n pb2s = [p.replace(\".py\", \"\").replace(\"/\", \".\").strip(\"\\n\") for p in pb2_list if \"gogo_pb2\" not in p]\n\n for pb2 in pb2s:\n pb = importlib.import_module(pb2)\n pp = proto_to_dict.ServiceCaller(pb)\n _data = pp.call_all_services()\n\n # 生成service class\n try:\n gen_service_cls(pp, _data)\n except TypeError as e:\n print(repr(e))\n continue\n","repo_name":"xiaobo4853464/gen_interfaces_by_python_pb","sub_path":"utils/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32474218846","text":"\"\"\"\n@File : cut_photo.py\n@Contact : panrs@venpoo.com\n\n@Modify Time\n------------\n2020/4/20 17:42\n\"\"\"\nimport logging\n\nfrom .base import AlgoBase\n\n\nclass CutPhoto(AlgoBase):\n __algo_name__ = 'cut_photo'\n\n def __init__(self, auth_info, file, specRule, ratios, img_size, background_color, process=None, ppi=300,\n fair_level=None, img_format='PNG', is_check=False, file_size_section=None, need_mask_image=False,\n need_fair=True, background_image_keys=None, beauty_level=None, **kwargs):\n \"\"\"\n 证件照制作(旧)\n :param auth_info: 验证信息\n :param file: 图片文件 可以是str:oss文件名 bytes:原图字节文件 PIL.Image.Image:PIL图片对象 algorithm.ExecutableFunction对象\n :param specRule: 检测参数\n :param ratios: 裁剪参数\n :param img_size: 结果图缩放参数\n :param background_color:背景颜色\n :param process: 原图缩放参数\n :param ppi: ppi\n :param fair_level:美颜级别\n :param img_format: 结果图文件格式\n :param is_check: 是否需要检测\n :param file_size_section: 文件大小控制\n :param need_mask_image:是否需要遮罩图片\n :param need_fair: 是否只需要美颜图片\n :param background_image_keys: 背景图片\n :param beauty_level: 新的美颜级别参数\n \"\"\"\n super().__init__(auth_info, self.__algo_name__)\n self.request['specRule'] = specRule\n self.request['process'] = process\n self.request['ratios'] = ratios\n self.request['file'] = self.file_auto_process(file)\n self.request['width_px'], self.request['height_px'] = img_size\n self.request['background_color'] = background_color\n self.request['ppi'] = ppi\n self.request['fair_level'] = fair_level\n self.request['img_format'] = img_format\n self.request['is_check'] = is_check\n self.request['file_size_section'] = file_size_section\n self.request['need_mask_image'] = need_mask_image\n self.request['need_fair'] = need_fair\n self.request['background_image_keys'] = background_image_keys\n self.request['beauty_level'] = beauty_level\n if not beauty_level and fair_level:\n logging.warning('fair_level 参数建议使用 beauty_level参数替换')\n self.request.update(kwargs)\n","repo_name":"panyunsuo/algorithm_sdk","sub_path":"algorithm_sdk/cut_photo.py","file_name":"cut_photo.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"444375393","text":"#!/usr/bin/env python3\nimport json\n\n\ndef printList(roadObject, roadNames, speedLimits, lanes, minimumSpeeds):\n for i in range(len(roadNames)):\n road = roadNames[i]\n if (road == roadObject[\"roadName\"]):\n print(\"Position:\", i, \"roadName:\", road)\n else:\n print(\"no RoadName found\")\n\n for y in range(len(speedLimits)):\n road = speedLimits[y]\n if (road == roadObject[\"speedLimit\"]):\n print(\"Position:\", y, \"Speedlimit:\", road)\n else:\n print(\"no SpeedLimit found\")\n\n for x in range(len(lanes)):\n road = lanes[x]\n if (road == roadObject[\"lanes\"]):\n print(\"Position:\", x, \"Lanes:\", road)\n else:\n print(\"no Lanes found\")\n\n for z in range(len(minimumSpeeds)):\n road = minimumSpeeds[z]\n if (road == roadObject[\"minimumSpeed\"]):\n print(\"Position:\", z, \"minimumSpeed:\", road)\n else:\n print(\"no MinimumSpeed found\")\n\n print(roadObject)\n","repo_name":"BohnerSimon/python_study","sub_path":"SImon/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"2239848248","text":"'''\npython3 시간 초과\npypy3 정답\n\n종이의개수 0과 비교하여\n-클래스 형식 버림\n-input() 대신 readline() 사용\n-string -> int 변환 안함\n-check 함수 수정\n\n실행 시간 : 5892ms -> 2212ms\n'''\n\nimport sys\nread = lambda : sys.stdin.readline().strip()\n\nresult = [0, 0, 0]\n\ndef check(mat):\n\tN = len(mat)\n\tglobal result\n\n\t# 나의 색종이가 모두 같은 색인지 확인\n\t# 시간 초과 나서 여기서 시간을 줄일 수 있지 않을까? 생각\n\tcheck_num = mat[0][0]\t# 체크할 원소는 첫번째 원소를 기준\n\n\t# pypy3 정답, python3 시간초과\n\tfor i in range(N):\n\t\tfor j in range(N):\n\t\t\tif check_num != mat[i][j]:\n\t\t\t\treturn False\n\n\tresult[(int)(check_num) + 1] += 1\n\treturn True\n\n\ndef divide(matrix):\n\tN = len(matrix)\n\tN13 = (int)(N/3) \n\tN23 = (int)(2*N/3)\n\t# 자기 자신이 모두 같은 색인지 확인\n\tif check(matrix) == True:\n\t\treturn\n\n\t# 모두 같은 색이 아니면 9등분 후 다시 확인\n\tMats = [list() for i in range(9)]\n\tfor i in range(N):\n\t\trow = matrix[i]\n\t\tif i/', BlogItemView.as_view(), name='blog_item'),\n path('create/', CreateBlogView.as_view(), name='blog')\n]\n","repo_name":"Cadi7/ebs-python-internship","sub_path":"apps/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19390653333","text":"import tensorflow as tf \n\ndef gated_linear_layer(inputs, gates, name = None):\n\n activation = tf.multiply(x = inputs, \\\n y = tf.sigmoid(gates), name = name)\n\n return activation\n\ndef instance_norm_layer(\n inputs, \n epsilon = 1e-06, \n activation_fn = None, \n name = None):\n\n instance_norm_layer = tf.contrib.layers.instance_norm(\n inputs = inputs,\n epsilon = epsilon,\n activation_fn = activation_fn)\n\n return instance_norm_layer\n\ndef conv1d_layer(\n inputs, \n filters, \n kernel_size, \n strides = 1, \n padding = 'same', \n activation = None,\n kernel_initializer = tf.random_normal_initializer(mean=0.0, \\\n stddev=0.01, dtype=tf.float32),\n name = None): #0.01\n\n conv_layer = tf.layers.conv1d(\n inputs = inputs,\n filters = filters,\n kernel_size = kernel_size,\n strides = strides,\n padding = padding,\n activation = activation,\n kernel_initializer = kernel_initializer,\n name = name)\n\n return conv_layer\n\ndef conv2d_layer(\n inputs, \n filters, \n kernel_size, \n strides, \n padding = 'same', \n activation = None,\n kernel_initializer = tf.random_normal_initializer(mean=0.0, \\\n stddev=0.01, dtype=tf.float32),\n name = None): #0.01\n\n conv_layer = tf.layers.conv2d(\n inputs = inputs,\n filters = filters,\n kernel_size = kernel_size,\n strides = strides,\n padding = padding,\n activation = activation,\n kernel_initializer = kernel_initializer,\n name = name)\n\n return conv_layer\n\ndef residual1d_block(\n inputs, \n filters = 1024, \n kernel_size = 3, \n strides = 1,\n name_prefix = 'residual_block_'):\n\n h1 = conv1d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_conv')\n h1_norm = instance_norm_layer(inputs = h1, activation_fn = None, \\\n name = name_prefix + 'h1_norm')\n h1_gates = conv1d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_gates')\n h1_norm_gates = instance_norm_layer(inputs = h1_gates, \\\n activation_fn = None, \\\n name = name_prefix + 'h1_norm_gates')\n h1_glu = gated_linear_layer(inputs = h1_norm, \\\n gates = h1_norm_gates, \\\n name = name_prefix + 'h1_glu')\n h2 = conv1d_layer(inputs = h1_glu, filters = filters // 2, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h2_conv')\n h2_norm = instance_norm_layer(inputs = h2, \\\n activation_fn = None, name = name_prefix + 'h2_norm')\n \n h3 = inputs + h2_norm\n\n return h3\n\ndef downsample1d_block(\n inputs, \n filters, \n kernel_size, \n strides,\n name_prefix = 'downsample1d_block_'):\n\n h1 = conv1d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_conv')\n h1_norm = instance_norm_layer(inputs = h1, \\\n activation_fn = None, name = name_prefix + 'h1_norm')\n h1_gates = conv1d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_gates')\n h1_norm_gates = instance_norm_layer(inputs = h1_gates, \\\n activation_fn = None, name = name_prefix + 'h1_norm_gates')\n h1_glu = gated_linear_layer(inputs = h1_norm, \\\n gates = h1_norm_gates, name = name_prefix + 'h1_glu')\n\n return h1_glu\n\ndef downsample2d_block(\n inputs, \n filters, \n kernel_size, \n strides,\n name_prefix = 'downsample2d_block_'):\n\n h1 = conv2d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_conv')\n h1_norm = instance_norm_layer(inputs = h1, \\\n activation_fn = None, name = name_prefix + 'h1_norm')\n h1_gates = conv2d_layer(inputs = inputs, filters = filters, \\\n kernel_size = kernel_size, strides = strides, \\\n activation = None, name = name_prefix + 'h1_gates')\n h1_norm_gates = instance_norm_layer(inputs = h1_gates, \\\n activation_fn = None, name = name_prefix + 'h1_norm_gates')\n h1_glu = gated_linear_layer(inputs = h1_norm, \\\n gates = h1_norm_gates, name = name_prefix + 'h1_glu')\n\n return h1_glu\n\ndef upsample1d_block(\n inputs, \n filters, \n kernel_size, \n strides,\n shuffle_size=2,\n name_prefix='upsample1d_block_'):\n \n h1 = conv1d_layer(inputs=inputs, filters=filters, \\\n kernel_size=kernel_size, strides=strides, \\\n activation=None, name=name_prefix + 'h1_conv')\n h1_shuffle = pixel_shuffler(inputs=h1, \\\n shuffle_size=shuffle_size, \\\n name=name_prefix + 'h1_shuffle')\n h1_norm = instance_norm_layer(inputs=h1_shuffle, \\\n activation_fn=None, name=name_prefix + 'h1_norm')\n\n h1_gates = conv1d_layer(inputs=inputs, filters=filters, \\\n kernel_size=kernel_size, strides=strides, \\\n activation=None, name=name_prefix + 'h1_gates')\n h1_shuffle_gates = pixel_shuffler(inputs=h1_gates, \\\n shuffle_size=shuffle_size, \\\n name=name_prefix + 'h1_shuffle_gates')\n h1_norm_gates = instance_norm_layer(inputs=h1_shuffle_gates, \\\n activation_fn=None, name=name_prefix + 'h1_norm_gates')\n\n h1_glu = gated_linear_layer(inputs=h1_norm, \\\n gates=h1_norm_gates, name=name_prefix + 'h1_glu')\n\n return h1_glu\n\ndef pixel_shuffler(inputs, shuffle_size=2, name=None):\n\n n = tf.shape(inputs)[0]\n w = tf.shape(inputs)[1]\n c = inputs.get_shape().as_list()[2]\n\n oc = c // shuffle_size\n ow = w * shuffle_size\n\n outputs = tf.reshape(tensor = inputs, shape = [n, ow, oc], name = name)\n\n return outputs\n\ndef adaptive_normalization(inputs, gamma_statistics, beta_statistics, name=None):\n \"\"\"\n Adaptive normalization gamma*input + beta\n \"\"\"\n return None\n\ndef create_filter_mask(center_frequencies, filter_size=15, num_masks=64, \n name_prefix='create_mask'):\n\n masks = []\n y = tf.range(start=0, limit=513, dtype=tf.float32, name=name_prefix+'_range')\n y = tf.reshape(y, [1, 513], name=name_prefix+'_range_reshape')\n for i in range(num_masks):\n center_frequency = center_frequencies[0, i]\n bin_center = tf.cast(7 + center_frequency*498, dtype=tf.int32, \n name=name_prefix+'_bin_center_%d'%i)\n y_gauss = tf.divide(tf.exp(-1 * (y - bin_center)**2 / (2*6.49)), 0.157, \n name=name_prefix+'_pdf_%d'%i)\n y_gauss = tf.matmul(tf.ones([filter_size, 1], dtype=tf.float32), y_gauss, \n name=name_prefix+'_repmat_%d'%i)\n y_gauss_dct = tf.signal.dct(y_gauss, name=name_prefix+'_dct_%d'%i)\n y_gauss_dct = tf.divide(y_gauss_dct[:,:23], tf.math.sqrt(1024), \n name=name_prefix+'_normalize_%d'%i)\n masks.append(y_gauss_dct)\n\n return masks\n\ndef apply_filter_mask(input, filter_mask, name_prefix='apply_mask'):\n return None\n","repo_name":"ravi-0841/spect-pitch-gan","sub_path":"modules/base_modules.py","file_name":"base_modules.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"27960116554","text":"#!/usr/bin/env python2.7\n\nimport math\nimport numpy as np\nimport numpy.matlib as npmatlib\nimport numpy.random\n\n#pylint: disable=C0301,C0111,W0603,W0613\nARTICLE_FEATURES = {}\nUSER_FEATURES_DIM = 6\nLAST_RECOMMENDATION = None\nLAST_USER = None\nALPHA = 2.5\n\n# Evaluator will call this function and pass the article features.\n# Check evaluator.py description for details.\ndef set_articles(art):\n for art_key in art:\n ARTICLE_FEATURES[art_key] = {'features' : art[art_key],\n 'm' : npmatlib.identity(USER_FEATURES_DIM),\n 'b' : npmatlib.zeros((USER_FEATURES_DIM, 1)),\n 'w' : npmatlib.zeros((USER_FEATURES_DIM, 1)),\n 'updated' : False}\n\n\n# This function will be called by the evaluator.\n# Check task description for details.\ndef update(reward):\n global LAST_RECOMMENDATION\n global LAST_USER\n if reward == 0:\n ARTICLE_FEATURES[LAST_RECOMMENDATION]['m'] += LAST_USER*LAST_USER.T\n ARTICLE_FEATURES['updated'] = True\n elif reward == 1:\n ARTICLE_FEATURES[LAST_RECOMMENDATION]['m'] += LAST_USER*LAST_USER.T\n ARTICLE_FEATURES[LAST_RECOMMENDATION]['b'] += LAST_USER\n ARTICLE_FEATURES['updated'] = True\n LAST_RECOMMENDATION = None\n LAST_USER = None\n\n# This function will be called by the evaluator.\n# Check task description for details.\ndef reccomend(timestamp, user_features, articles):\n global LAST_RECOMMENDATION\n global LAST_USER\n LAST_USER = np.matrix([user_features]).T\n LAST_RECOMMENDATION = max(articles, key=calculate_ucb(LAST_USER))\n return LAST_RECOMMENDATION\n\ndef calculate_ucb(user_features):\n def _calculate_ucb(art_id):\n current_article = ARTICLE_FEATURES[art_id]\n if current_article['updated']:\n current_article['w'] = current_article['m'].I*current_article['b']\n current_article['updated'] = False\n ucb = (current_article['w'].T*user_features)[0,0]\n ucb += ALPHA*math.sqrt((user_features.T*current_article['m'].I*user_features)[0,0])\n return ucb\n return _calculate_ucb\n","repo_name":"dballesteros7/DataMining-2014","sub_path":"submissions/linucb.py","file_name":"linucb.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"72139022517","text":"from utils import auto_format as fmt\nfrom utils import substring_after, substring_before, change_last_letter\nfrom utils import Range\nimport time, alarm\nfrom utils import Communication\nimport datetime\n\nclass Controller():\n def __init__ (self, identity, uiMainWindow, model, communication: Communication, binanceHandler, okxHandler, bybitHandler):\n self.labelDict = {}\n self.save_frequency_m = 10*60\n self.retrieve_frequency = 30\n self.current_time = 0\n self.identity_ = identity\n self.communication_ = communication\n self.BinanceHandler_ = binanceHandler\n self.BybitHandler_ = bybitHandler\n self.OKXHandler_ = okxHandler\n self.uiMainWindow_ = uiMainWindow\n self.model_ = model\n self.uiMainWindow_.button_changeThreshold.clicked.connect(self.change_threshold_button_clicked)\n self.uiMainWindow_.button_transfer.clicked.connect(self.transfer_button_clicked)\n self.uiMainWindow_.button_export.clicked.connect(self.export_button_clicked)\n\n markets = [\"Bi\", \"Ok\", \"By\"]\n subaccounts = [\"M\", \"1\", \"2\", \"3\"]\n coinTypes = [\"U\", \"C\"]\n\n self.labelDict = {}\n\n for market in markets:\n for subaccount in subaccounts:\n if market == \"Bi\" or market == \"Ok\":\n for coin_type in coinTypes:\n label_key = f\"{market}{subaccount}{coin_type}\"\n label_name = f\"label_{market}{subaccount}{coin_type}\"\n self.labelDict[label_key] = getattr(self.uiMainWindow_, label_name)\n else:\n label_key = f\"{market}{subaccount}U\"\n label_name = f\"label_{market}{subaccount}U\"\n self.labelDict[label_key] = getattr(self.uiMainWindow_, label_name)\n\n self.uiMainWindow_.label_infinity.setStyleSheet(\"QLabel { border: 1px solid black;}\")\n self.uiMainWindow_.label_totalValue.setStyleSheet(\"QLabel { border: 1px solid black;}\")\n\n # Todo: Update data everytime the combo boxes are clicked\n def transfer_button_clicked(self):\n # Todo: What if error happens here?\n self.update_data()\n self.upload_withdrawable()\n try:\n withdrawAmount = float(self.uiMainWindow_.lineEdit_withdrawAmount.text())\n except:\n return\n moduleDict = {\"Binance\": self.BinanceHandler_,\n \"Bybit\": self.BybitHandler_,\n \"Okx\": self.OKXHandler_}\n exchangeFrom = self.uiMainWindow_.comboBox_exchangeFrom.currentText()\n accountFrom = self.uiMainWindow_.comboBox_accountFrom.currentText()\n exchangeTo = self.uiMainWindow_.comboBox_exchangeTo.currentText()\n accountTo = self.uiMainWindow_.comboBox_accountTo.currentText()\n coin = self.uiMainWindow_.comboBox_withdrawCoin.currentText()\n\n # Todo: Check if the withdrawal amount is enough\n # Then move to money to funding wallet\n # The money maybe less than the requested amount\n # If so => confirm from user\n # Execute the move,\n # constantly fetch data from server/subcribe to a socket to check withdrawal progress\n # Binance: cannot be cancelled, Bybit and Okx: can be cancelled\n # For auto-pilot situation, no confirmation and UI needed\n\n # Internal transfer: Only prompt a simple message\n if exchangeFrom == exchangeTo:\n moduleDict[exchangeFrom].transfer_money_internal()\n else:\n pass\n\n def export_button_clicked(self):\n self.model_.export_data()\n\n def change_threshold_button_clicked(self):\n alarm = self.uiMainWindow_.lineEdit_threshold.text().replace(\" \", \"\")\n asset = self.uiMainWindow_.lineEdit_assetName.text().upper()\n if asset == \"\":\n asset = \"USDT\"\n self.uiMainWindow_.lineEdit_threshold.setText(\"\")\n self.uiMainWindow_.lineEdit_assetName.setText(\"\")\n try:\n alarm = Range(float(substring_before(alarm, \"-\")), float(substring_after(alarm, \"-\")))\n except:\n return\n market = self.uiMainWindow_.comboBox_market.currentText()\n coin_type = self.uiMainWindow_.comboBox_coinType.currentText()\n alarm_type = self.uiMainWindow_.comboBox_alarmType.currentText()\n sub_acc = self.uiMainWindow_.comboBox_subAcc.currentText()\n\n symbol_mappings = {\n \"Binance\": \"Bi\",\n \"OKX\": \"Ok\",\n \"Bybit\": \"By\",\n \"Main\": \"M\",\n \"Sub1\": \"1\",\n \"Sub2\": \"2\",\n \"Sub3\": \"3\",\n \"USDM\": \"U\",\n \"COINM\": \"C\"\n }\n\n symbol = \"\".join(symbol_mappings.get(item, \"\") for item in [market, sub_acc, coin_type])\n\n if \"By\" in symbol:\n symbol = change_last_letter(symbol, \"U\")\n\n if alarm_type == \"Risk\":\n self.model_.set_data(symbol=symbol, asset_name=asset, risk_alarm=alarm)\n elif alarm_type == \"Equity\":\n self.model_.set_data(symbol=symbol, asset_name=asset, equity_alarm=alarm)\n elif alarm_type == \"Position\":\n self.model_.set_data(symbol=symbol, asset_name=asset, position_alarm=alarm)\n self.upload_risk()\n\n def update_data(self):\n # Define a list or dictionary of handlers\n handlers = {\n \"Binance\": self.BinanceHandler_,\n \"OKX\": self.OKXHandler_,\n \"Bybit\": self.BybitHandler_\n }\n\n self.model_.set_universal_mark_prices(self.BinanceHandler_.get_universal_mark_prices())\n\n # Iterate through the handlers and set data in the model\n for handler in handlers.values():\n risk_data = handler.get_account_status()\n for key, value in risk_data.items():\n symbol = substring_before(key, \"_\")\n asset_name = substring_after(key, \"_\")\n risk, equity, withdrawable = value.get(\"risk\"), value.get(\"equity\"), value.get(\"withdrawable\")\n long_pos, short_pos = value.get(\"long_pos\"), value.get(\"short_pos\")\n initial, maintenance = value.get(\"initial\"), value.get(\"maintenance\")\n self.model_.set_data(symbol=symbol, asset_name=asset_name,\n risk=risk, equity=equity, withdrawable=withdrawable,\n long_pos=long_pos, short_pos=short_pos, initial=initial, maintenance=maintenance)\n\n def upload_withdrawable(self):\n marketFrom = self.uiMainWindow_.comboBox_exchangeFrom.currentText()\n accountFrom = self.uiMainWindow_.comboBox_accountFrom.currentText()\n targetSymbol = f\"{marketFrom[:2]}{'M' if accountFrom == 'Main' else accountFrom[-1:]}U\"\n\n for dict in self.model_.get_data(symbol=targetSymbol):\n if dict[\"name\"] == \"USDT\":\n self.uiMainWindow_.label_withdrawable.setText(f\"{round(dict.get('withdrawable'), 1)}\")\n return\n\n self.uiMainWindow_.label_withdrawable.setText(\"0\")\n\n def upload_risk(self):\n def handle_frontend_data(list):\n total_value = 0\n def calculate_position_risk(long_pos, short_pos):\n if long_pos == 0 or short_pos == 0:\n return 0\n a = long_pos/(long_pos+abs(short_pos))\n b = abs(short_pos)/(long_pos+abs(short_pos))\n return abs(a-b)\n\n returnStr = \"\"\n for dict in list:\n total_value += dict[\"equity\"]\n position_background_color = None\n risk_background_color = None\n equity_background_color = None\n\n position = calculate_position_risk(dict[\"long_pos\"], dict[\"short_pos\"])\n\n if dict[\"risk\"] != 0:\n send_symbol = \"Tuan Anh \" if self.identity_ == \"TA\" else \"Steve \"\n if \"Bi\" in symbol:\n send_symbol += \"Binance \"\n elif \"Ok\" in symbol:\n send_symbol += \"OKX \"\n elif \"By\" in symbol:\n send_symbol += \"Bybit \"\n\n if symbol[2] != \"M\":\n send_symbol += f\"Sub{symbol[2]} \"\n else:\n send_symbol += \"Main \"\n\n if dict[\"risk_alarm\"].out_of_range(dict[\"risk\"]):\n risk_background_color = \"yellow\"\n alarm.activate(message=f\"{send_symbol}risk alarm {dict['name']}: {dict['risk']}\", alarm=True)\n\n if dict[\"equity_alarm\"].out_of_range(dict[\"equity\"]):\n equity_background_color = \"yellow\"\n alarm.activate(message=f\" {send_symbol}equity alarm {dict['name']}: {dict['equity']}\", alarm=True)\n\n if position != 0:\n if dict[\"position_alarm\"].out_of_range(position):\n position_background_color = \"yellow\"\n alarm.activate(message=f\"{send_symbol}position alarm {dict['name']}: {position}\", alarm=True)\n\n returnStr += \"(\" + dict[\"name\"] + \") \"\n returnStr += \"Free: \" + fmt(0) + \" / \" + fmt(dict[\"withdrawable\"], color=\"blue\") + \"
\"\n if dict[\"initial\"] > 0:\n returnStr += \"Margin: \" + fmt(dict[\"initial\"]) + \" / \" + fmt(dict[\"maintenance\"], color=\"red\") + \"
\"\n if dict[\"risk\"] > 0:\n if \"Ok\" in symbol:\n returnStr += \"Risk: \" + fmt(dict[\"risk_alarm\"].start, color=\"red\") + \" / \" + fmt(dict[\"risk\"], background_color=risk_background_color, font_weight=\"bold\") + \" / \" + fmt(dict[\"risk_alarm\"].end, color=\"blue\") + \"
\"\n else:\n returnStr += \"Risk: \" + fmt(dict[\"risk_alarm\"].start, color=\"red\", formatStr=\".0%\") + \" / \" + fmt(dict[\"risk\"], background_color=risk_background_color, formatStr=\".2%\", font_weight=\"bold\") + \" / \" + fmt(dict[\"risk_alarm\"].end, color=\"blue\", formatStr=\".0%\") + \"
\"\n if dict[\"equity\"] > 0:\n returnStr += \"Asset: \" + fmt(dict[\"equity_alarm\"].start, color=\"red\") + \" / \" + fmt(dict[\"equity\"], background_color=equity_background_color, font_weight=\"bold\") + \" / \" + fmt(dict[\"equity_alarm\"].end, color=\"blue\") + \"
\"\n if position != 0:\n returnStr += \"Position: \" + fmt(dict[\"long_pos\"]) + \" / \" + fmt(dict[\"short_pos\"], color=\"red\") + \"
\"\n returnStr += \"Rate: \" + fmt(dict[\"position_alarm\"].start, color=\"red\", formatStr=\".0%\") + \" / \" + fmt(position, background_color=position_background_color, formatStr=\".2%\", font_weight=\"bold\") + \" / \" + fmt(dict[\"position_alarm\"].end, color=\"blue\", formatStr=\".0%\") + \"
\"\n\n return total_value, returnStr[:-4]\n\n total_value = 0\n for symbol, qtLabel in self.labelDict.items():\n symbol_list = self.model_.get_data(symbol=symbol)\n if symbol_list:\n value, stringText = handle_frontend_data(symbol_list)\n total_value += value\n qtLabel.setText(stringText)\n\n self.uiMainWindow_.label_totalValue.setText(f\"Total: {round(total_value, 2)}\")\n\n def data_loop(self):\n # Todo: Stop this when transferring\n if int(self.current_time/self.retrieve_frequency) < int(time.time()/self.retrieve_frequency):\n self.update_data()\n self.communication_.ui_signal.emit()\n\n if int(self.current_time/self.save_frequency_m) < int(time.time()/self.save_frequency_m):\n self.model_.save_data()\n\n self.current_time = time.time()\n\n # These loops must not modify the Model objects\n def ui_update(self):\n self.upload_withdrawable()\n self.upload_risk()\n self.uiMainWindow_.label_infinity.setText(f\"{datetime.datetime.now().strftime('%H:%M:%S')}\")","repo_name":"VoHieu123/crypto_app","sub_path":"Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":11943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20726539768","text":"# --- Day 9: All in a Single Night ---\n#\n# Every year, Santa manages to deliver all of his presents in a single night.\n#\n# This year, however, he has some new locations to visit; his elves have provided him the distances between every pair of locations. He can start and end at any two (different) locations he wants, but he must visit each location exactly once. What is the shortest distance he can travel to achieve this?\n#\n# For example, given the following distances:\n#\n# London to Dublin = 464\n# London to Belfast = 518\n# Dublin to Belfast = 141\n#\n# The possible routes are therefore:\n#\n# Dublin -> London -> Belfast = 982\n# London -> Dublin -> Belfast = 605\n# London -> Belfast -> Dublin = 659\n# Dublin -> Belfast -> London = 659\n# Belfast -> Dublin -> London = 605\n# Belfast -> London -> Dublin = 982\n#\n# The shortest of these is London -> Dublin -> Belfast = 605, and so the answer is 605 in this example.\n#\n# What is the distance of the shortest route?\n\n\nfrom copy import copy\n\nwith open('day09input.txt') as input_file:\n lines = input_file.read().splitlines()\n\n# lines = [\n# 'London to Dublin = 464',\n# 'London to Belfast = 518',\n# 'Dublin to Belfast = 141'\n# ]\n\ndict_routes = {\n # 'London': {\n # 'Dublin': 464,\n # 'Belfast': 518\n # }\n}\n\nfor line in lines:\n route, distance = line.split(' = ')\n loc_1, loc_2 = route.split(' to ')\n distance = int(distance)\n if loc_1 not in dict_routes.keys():\n dict_routes[loc_1] = {}\n dict_routes[loc_1][loc_2] = distance\n if loc_2 not in dict_routes.keys():\n dict_routes[loc_2] = {}\n dict_routes[loc_2][loc_1] = distance\n\n\ndef visit(dict_routes, loc_visited_prev, loc_current, distance_prev, distance_list):\n if len(loc_visited_prev) == len(dict_routes.keys()):\n # print(' -> '.join(loc_visited_prev) + f' = {distance_prev}')\n distance_list.append(distance_prev)\n return distance_prev\n else:\n for loc in dict_routes[loc_current].keys():\n loc_visited_next = copy(loc_visited_prev)\n distance_acc = distance_prev\n if loc not in loc_visited_next:\n loc_visited_next.append(loc)\n distance_acc += dict_routes[loc_current][loc]\n distance_acc += visit(dict_routes, loc_visited_next, loc, distance_acc, distance_list)\n return distance_acc\n\n\ndistance_list = []\n\nfor loc_start in dict_routes.keys():\n loc_visited = [loc_start]\n distance_trip = visit(dict_routes, loc_visited, loc_start, 0, distance_list)\n\nprint(f'Distance of the shortest route: {min(distance_list)}')\n\n\n# --- Part Two ---\n#\n# The next year, just to show off, Santa decides to take the route with the longest distance instead.\n#\n# He can still start and end at any two (different) locations he wants, and he still must visit each location exactly once.\n#\n# For example, given the distances above, the longest route would be 982 via (for example) Dublin -> London -> Belfast.\n#\n# What is the distance of the longest route?\n\n\nprint(f'Distance of the longest route: {max(distance_list)}')\n","repo_name":"tinyboxvk/Advent-of-Code-2015","sub_path":"day09.py","file_name":"day09.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27633908319","text":"import pygame\nimport random\nimport sys\n\n# Constants\nWIDTH, HEIGHT = 600, 400\nCELL_SIZE = 20\nFPS = 5\nSCORE_SPEED_INCREMENT = 50 # Number of points required to increase speed\nSNAKE_SPEED = 8 # Initial speed\n\n# Colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n# Directions\nUP = (0, -1)\nDOWN = (0, 1)\nLEFT = (-1, 0)\nRIGHT = (1, 0)\n\nclass SnakeGame:\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Snake Xenia\")\n self.clock = pygame.time.Clock()\n self.running = False\n self.game_over = False\n self.snake = Snake(self)\n self.food = Food(self)\n self.score = 0\n self.font = pygame.font.Font(None, 36)\n self.speed = SNAKE_SPEED\n\n def run(self):\n while not self.game_over:\n self.handle_events()\n if self.running:\n self.update()\n self.draw()\n self.clock.tick(self.speed)\n else:\n self.show_start_screen()\n \n # Show the \"Game Over\" screen and wait for a key press to continue\n self.show_game_over_screen()\n self.wait_for_key()\n pygame.quit()\n sys.exit() # Ensure the application exits when the game ends\n\n def handle_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_over = True\n elif event.type == pygame.KEYDOWN:\n if not self.running:\n self.running = True # Start the game on any key press\n if event.key == pygame.K_UP:\n self.snake.change_direction(UP)\n elif event.key == pygame.K_DOWN:\n self.snake.change_direction(DOWN)\n elif event.key == pygame.K_LEFT:\n self.snake.change_direction(LEFT)\n elif event.key == pygame.K_RIGHT:\n self.snake.change_direction(RIGHT)\n\n def show_start_screen(self):\n self.screen.fill(BLACK)\n text = self.font.render(\"Tap any key to start\", True, GREEN)\n text_rect = text.get_rect(center=(WIDTH / 2, HEIGHT / 2))\n self.screen.blit(text, text_rect)\n pygame.display.flip()\n\n def show_game_over_screen(self):\n self.screen.fill(BLACK)\n game_over_text = self.font.render(\"Game Over\", True, RED)\n game_over_text_rect = game_over_text.get_rect(center=(WIDTH / 2, HEIGHT / 2 - 20))\n score_text = self.font.render(f\"Score: {self.score}\", True, WHITE)\n score_text_rect = score_text.get_rect(center=(WIDTH / 2, HEIGHT / 2 + 20))\n self.screen.blit(game_over_text, game_over_text_rect)\n self.screen.blit(score_text, score_text_rect)\n pygame.display.flip()\n\n def wait_for_key(self):\n waiting_for_key = True\n while waiting_for_key:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n waiting_for_key = False\n\n def calculate_speed(self):\n if self.score >= SCORE_SPEED_INCREMENT:\n self.speed = SNAKE_SPEED + (self.score // SCORE_SPEED_INCREMENT)\n else:\n self.speed = SNAKE_SPEED\n return self.speed\n\n def update(self):\n self.snake.move()\n if self.snake.check_collision():\n self.game_over = True\n if self.snake.head == self.food.position:\n self.snake.grow()\n self.food.randomize_position()\n self.score += 1\n\n def draw(self):\n self.screen.fill(BLACK)\n self.snake.draw()\n self.food.draw()\n self.display_score() # Display score on the screen\n pygame.display.flip()\n\n def display_score(self):\n score_text = self.font.render(f\"Score: {self.score}\", True, WHITE)\n self.screen.blit(score_text, (10, 10))\n\nclass Snake:\n def __init__(self, game):\n self.game = game\n self.body = [(5, 5)]\n self.direction = RIGHT\n\n @property\n def head(self):\n return self.body[0]\n\n def change_direction(self, new_direction):\n if (\n (new_direction == UP and self.direction != DOWN) or\n (new_direction == DOWN and self.direction != UP) or\n (new_direction == LEFT and self.direction != RIGHT) or\n (new_direction == RIGHT and self.direction != LEFT)\n ):\n self.direction = new_direction\n\n def move(self):\n new_head = (self.head[0] + self.direction[0], self.head[1] + self.direction[1])\n self.body.insert(0, new_head)\n if self.head == self.game.food.position:\n return\n self.body.pop()\n\n def check_collision(self):\n if (\n self.head in self.body[1:] or\n self.head[0] < 0 or self.head[0] >= WIDTH // CELL_SIZE or\n self.head[1] < 0 or self.head[1] >= HEIGHT // CELL_SIZE\n ):\n return True\n return False\n\n def grow(self):\n self.body.append((0, 0)) # Dummy values; the new segment will be adjusted in the move function\n\n def draw(self):\n for segment in self.body:\n x, y = segment\n pygame.draw.rect(self.game.screen, GREEN, (x * CELL_SIZE, y * CELL_SIZE, CELL_SIZE, CELL_SIZE))\n\nclass Food:\n def __init__(self, game):\n self.game = game\n self.position = (random.randint(0, WIDTH // CELL_SIZE - 1), random.randint(0, HEIGHT // CELL_SIZE - 1))\n\n def randomize_position(self):\n self.position = (random.randint(0, WIDTH // CELL_SIZE - 1), random.randint(0, HEIGHT // CELL_SIZE - 1))\n\n def draw(self):\n x, y = self.position\n pygame.draw.rect(self.game.screen, RED, (x * CELL_SIZE, y * CELL_SIZE, CELL_SIZE, CELL_SIZE))\n\nif __name__ == \"__main__\":\n game = SnakeGame()\n game.run()\n","repo_name":"swornika-maharjan/python","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9446893491","text":"import sys\nimport os\n\nfrom kafka import KafkaAdminClient\nfrom kafka.admin import NewTopic\nfrom kafka.errors import TopicAlreadyExistsError\n\n\"\"\"\nCreate kafka topics from file contain topic names separated by new line.\nCurrently support default num_partitions=1, replication_factor=1.\nChange kafka port by passing env variable KAFKA_PORT\n\nExample:\nKAFKA_PORT=49092 python3 create_topics_from_file.py testdata/topics\n\"\"\"\nKAFKA_PORT = os.environ.get('KAFKA_PORT', '9092')\nconfig = {\n 'bootstrap_servers': f'localhost:{KAFKA_PORT}',\n 'client_id': 'kafka-tool-01',\n}\n\n\nclass Topic(NewTopic):\n def __init__(self, name: str, num_partitions: int = 1, replication_factor: int = 1):\n self.name = name\n self.num_partitions = num_partitions\n self.replication_factor = replication_factor\n self.replica_assignments = {}\n self.topic_configs = {}\n\n\nif len(sys.argv) < 2:\n raise Exception(f'Require one params: file contain topic names separated by new line')\nfile_name = sys.argv[1]\n\n\ndef create(topic_names: list):\n client = None\n try:\n client = KafkaAdminClient(**config)\n print(config)\n\n for name in topic_names:\n topic = Topic(name)\n try:\n client.create_topics(new_topics=[topic])\n print(f'Created topic: {name}')\n except TopicAlreadyExistsError as e1:\n print(f'Topic {name} exists => skip')\n except Exception as e:\n print(f'creating topic {name} failed: ' + e)\n except Exception as e:\n print(e)\n finally:\n if client:\n client.close()\n\n\ndef main():\n with open(file_name) as file:\n topics = [line.strip() for line in file.readlines()]\n create(topics)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nguyenvinhchi/kafka-streams-examples","sub_path":"tools/create_topics_from_file.py","file_name":"create_topics_from_file.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32263373958","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\ndef card2number(n, card):\n if card[0] == 'S':\n return int(card[1:])-1\n elif card[0] == 'C':\n return n + int(card[1:])-1\n elif card[0] == 'D':\n return 2*n + int(card[1:])-1\n elif card[0] == 'H':\n return 3*n + int(card[1:])-1\n else:\n raise Exception\n\nn, q = map(int, input().split())\n\nhead = Node(0)\nnodes = [head]\nfor i in range(1, 4*n):\n node = Node(i)\n nodes[-1].right = node\n node.left = nodes[-1]\n nodes.append(node)\n\nfor _ in range(q):\n l, r = map(lambda arg: card2number(n, arg), input().split())\n if nodes[l].left is None:\n continue\n nodes[l].left.right = nodes[r].right\n if nodes[r].right is not None:\n nodes[r].right.left = nodes[l].left\n head.left = nodes[r]\n nodes[r].right = head\n head = nodes[l]\n nodes[l].left = None\n tmp = head\n\ncards = [[], [], [], []]\nfor _ in range(4*n):\n cards[head.value//n].append(head.value%n+1)\n head = head.right\nfor i in range(4):\n print(*[f'{\"SCDH\"[i]}{card}' for card in cards[i]])\n","repo_name":"take44444/sakumon","sub_path":"triumph/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23436658131","text":"\"\"\"\nThe Millionth Fibonacci Kata\n\nThe year is 1214. One night, Pope Innocent III awakens to find the the archangel\nGabriel floating before him. Gabriel thunders to the pope:\n\nGather all of the learned men in Pisa, especially Leonardo Fibonacci.\nIn order for the crusades in the holy lands to be successful, these men must\ncalculate the millionth number in Fibonacci's recurrence. Fail to do this,\nand your armies will never reclaim the holy land. It is His will.\nThe angel then vanishes in an explosion of white light.\n\nPope Innocent III sits in his bed in awe. How much is a million? he thinks to\nhimself. He never was very good at math.\n\nHe tries writing the number down, but because everyone in Europe is still\nusing Roman numerals at this moment in history, he cannot represent this number.\nIf he only knew about the invention of zero, it might make this sort of thing easier.\n\nHe decides to go back to bed. He consoles himself, The Lord would never challenge\nme thus; this must have been some deceit by the devil. A pretty horrendous\nnightmare, to be sure.\n\nPope Innocent III's armies would go on to conquer Constantinople\n(now Istanbul), but they would never reclaim the holy land as he desired.\n\nIn this kata you will have to calculate fib(n) where:\n\nfib(0) := 0\nfib(1) := 1\nfin(n + 2) := fib(n + 1) + fib(n)\nWrite an algorithm that can handle n where 1000000 ≤ n ≤ 1500000.\n\"\"\"\nimport sys\nsys.path.append('..')\n\nfrom helpers.test_wrapper import Test\n\n\ndef fib_helper(n):\n if n == 0:\n return (0, 1)\n\n div, rem = divmod(n, 2)\n fibn, fibnp1 = fib_helper(div)\n\n fib2n = 2 * fibn * fibnp1 - fibn * fibn\n fib2np1 = fibn * fibn + fibnp1 * fibnp1\n\n if rem:\n return (fib2np1, fib2n + fib2np1)\n return (fib2n, fib2np1)\n\n\ndef fib(n):\n if n < 0:\n if n % 2 == 0:\n return -fib_helper(-n)[0]\n return fib_helper(-n)[0]\n return fib_helper(n)[0]\n\n\ndef run_tests():\n with Test() as test:\n test.describe(\"Basic tests\")\n test.it(\"Verifying that fib(0) == 0\")\n test.assert_equals(fib(0), 0)\n\n test.it(\"Verifying that fib(1) == 1\")\n test.assert_equals(fib(1), 1)\n\n test.it(\"Verifying that fib(2) == 1\")\n test.assert_equals(fib(2), 1)\n\n test.it(\"Verifying that fib(3) == 2\")\n test.assert_equals(fib(3), 2)\n\n test.it(\"Verifying that fib(4) == 3\")\n test.assert_equals(fib(4), 3)\n\n test.it(\"Verifying that fib(5) == 5\")\n test.assert_equals(fib(5), 5)\n\n test.it(\"Verifying that fib(-4) == -3\")\n test.assert_equals(fib(-4), -3)\n\n test.it(\"Verifying that fib(-81) == -37889062373143906\")\n test.assert_equals(fib(-81), 37889062373143906)\n\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"nlpet/codewars","sub_path":"Mathematics/millionth_fib.py","file_name":"millionth_fib.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"10120125231","text":"import os\nfrom django.core.files import File\nfrom django.contrib.auth.hashers import make_password\nfrom trec_eval_project.settings import BASE_DIR\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trec_eval_project.settings')\n\nimport django\n\ndjango.setup()\n\nfrom trec.models import Researcher, Track, Task, Run, Run_type, Query_type, Feedback_type\nfrom django.contrib.auth.models import User\n\n# Used to get random index\nfrom random import randrange\n\n\ndef populate():\n run_id = 0\n\n qrel_path = os.path.join(BASE_DIR, 'pop script data', 'qrels/')\n runs_path = os.path.join(BASE_DIR, 'pop script data', 'runs/')\n \n # add superuser\n add_user('admin', '', 'adminpass', is_superuser=True)\n\n # requred example users.\n add_user('jill', '', 'jill')\n add_user('bob', '', 'bob')\n add_user('jen', '', 'jen')\n\n # turn users into \"research gurus\"\n jill = add_researcher('jill', 'jill-display', website='https://www.google.co.uk/', organisation='Planet Express', )\n bob = add_researcher('bob', 'bob-display', website='https://www.google.co.uk/', organisation='Planet Express', )\n jen = add_researcher('jen', 'jen-display', website='https://www.google.co.uk/', organisation='MomCorp', )\n\n user_list = [jill, bob, jen]\n\n # Add all tracks in qrel folder\n # Also fill tracks with qrel tasks\n for track in os.listdir(qrel_path):\n if not track.startswith('.'):\n add_Track(track, 'http://www.google.com', 'Description - A simple track for searching about ' + track,\n 'Genre: ' + track)\n\n for task in os.listdir(qrel_path + track):\n if task.endswith('qrels.txt'):\n add_Task(track, task, 'http://www.google.com', 'Description - A task from the world of tomorrow!',\n 2005, qrel_path + track + '/' + task)\n\n\n # add tracks\n # add_Track('test_track_1', 'http://www.google.com', 'Description - A simple track for tests.', 'Genre - tester')\n\n # add tasks\n # add_Task('test_track_1', 'test_task_1', 'http://www.google.com', 'Description - A simple task...', 1990, test_qrel)\n # add_Task('test_track_1', 'test_task_2', 'http://www.google.com', 'Description - Testing purpose only', 1990, test_qrel)\n\n # add_Task('test_track_1', 'test_task_1', 'http://www.google.com', 'Description - A simple task...', 1990, test_qrel)\n\n # Now go in search of runs all tracks / tasks and upload to database\n # repeat it a few times to bulk up database\n\n for a in xrange(0,10):\n for track in os.listdir(runs_path):\n if not track.startswith('.'):\n for run in os.listdir(runs_path + track):\n if not run.startswith('.'):\n run_id = run_id + 1\n add_run(get_rand_user(user_list), get_task_name(qrel_path, track), 'test_run' + str(run_id),\n 'Description of run', runs_path + track + '/' + run, Run_type.AUTOMATIC, Query_type.OTHER,\n Feedback_type.NONE, run_id, False)\n\n\ndef get_rand_user(user_list):\n random_index = randrange(0, len(user_list))\n return user_list[random_index]\n\n\n# Joseph - probably a better way of getting the name\n# of a task a run is submitted to \ndef get_task_name(qrel_path, track):\n task = \"No qrel task\"\n\n for t in os.listdir(qrel_path + track):\n if t.endswith('qrels.txt'):\n task = t\n return task\n\n\ndef add_user(username, email, password, is_superuser=False):\n password = make_password(password) # get_or_create does not hash the password, so we do so here.\n defaults = {'email': email,\n 'password': password,\n 'is_staff': is_superuser,\n 'is_superuser': is_superuser}\n u = User.objects.get_or_create(username=username, defaults=defaults)[0]\n u.save()\n return u\n\n\ndef add_researcher(username, display_name, website='', organisation='', picture='profile_images/default.png'):\n defaults = {'website': website,\n 'organisation': organisation,\n 'display_name': display_name,\n 'profile_picture': picture}\n user = User.objects.filter(username=username)[0]\n r = Researcher.objects.get_or_create(user=user,\n defaults=defaults)[0]\n r.save()\n return r\n\n\ndef add_Track(title, url, description, genre):\n t = Track.objects.get_or_create(title=title,\n track_url=url,\n description=description,\n genre=genre)[0]\n t.save()\n return t\n\n\ndef add_Task(track_title, title, url, description, year, qrel_file_path):\n qrel_file = File(open(qrel_file_path))\n track = Track.objects.filter(title=track_title)[0]\n defaults = {'track': track,\n 'task_url': url,\n 'description': description,\n 'year': year,\n 'judgements_file': qrel_file}\n t = Task.objects.get_or_create(title=title, defaults=defaults)[0]\n t.save()\n return t\n\n\ndef add_run(researcher_name, task_title, name, description, results_file_path, run_type, query_type, feedback_type,\n run_id, new):\n if new:\n \tresults_file = results_file_path\n else:\n \tresults_file = File(open(results_file_path))\n \n user = User.objects.filter(username=researcher_name)[0]\n researcher = Researcher.objects.filter(user=user)[0]\n task = Task.objects.filter(title=task_title)[0]\n defaults = {'researcher': researcher,\n 'task': task,\n 'description': description,\n 'result_file': results_file,\n 'run_type': run_type,\n 'query_type': query_type,\n 'feedback_type': feedback_type,\n 'run_id': name + '-' + str(run_id)}\n r = Run.objects.get_or_create(name=name, defaults=defaults)[0]\n r.save()\n r.populate_with_trec_eval_data()\n r.save()\n return r\n\n\nif __name__ == '__main__':\n populate()\n\n\n# Creator comments (Joseph) - thanks Michelle for setting this up\n# butchered / hacked it to fill database with random assignment of\n# runs to users\n","repo_name":"incipientOne/trec","sub_path":"trec_eval_project/populate_trec.py","file_name":"populate_trec.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7761137741","text":"from xml_schema import XMLSchema\n\ntry:\n # Encode the dictionary into an XML document\n message = message_schema.encode(message_dict)\n # Convert the XML document to a string\n xml_string = message.toxml()\n # Print the XML string to the console\n print(xml_string)\nexcept Exception as e:\n print(f\"An error occurred: {e}\")\n","repo_name":"mxcheung/pypi_xml_schema","sub_path":"error_handling/message_schema_encode.py","file_name":"message_schema_encode.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74262770357","text":"# найти минимальный набор станций, который бы покрывал все 50 штатов?\r\nstates_needed = {\"mt\", \"wa\", \"or\", \"id\", \"nv\", \"ut\", \"ca\", \"az\"} # set\r\n\r\nstations = {}\r\nstations[\"kone\"] = {\"id\", \"nv\", \"ut\"} # set\r\nstations[\"ktwo\"] = {\"wa\", \"id\", \"mt\"} # set\r\nstations[\"kthree\"] = {\"or\", \"nv\", \"ca\"} # set\r\nstations[\"kfour\"] = {\"nv\", \"ut\"} # set\r\nstations[\"kfive\"] = {\"ca\", \"az\"} # set\r\n\r\nfinal_stations = set()\r\n\r\nwhile states_needed:\r\n best_station = None\r\n states_covered = set()\r\n for station, states in stations.items():\r\n covered = states & states_needed\r\n if len(covered) > len(states_covered):\r\n best_station = station\r\n states_covered = covered\r\n\r\n states_needed -= states_covered\r\n final_stations.add(best_station)\r\n\r\nprint(final_stations)\r\n","repo_name":"kwuh91/dynamic_greedy-problems-examples","sub_path":"set-intersection.py","file_name":"set-intersection.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29980505608","text":"import os\n\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\n\nfrom .bench import Monitor\nfrom .vec_env.vec_env import VecEnvWrapper\nfrom .vec_env.dummy_vec_env import DummyVecEnv\nfrom .vec_env.shmem_vec_env import ShmemVecEnv\nfrom .vec_env.vec_normalize import VecNormalize as VecNormalize_\n\nimport retro\nfrom .wrappers import SonicJointEnv, TimeLimit, AllowBacktracking, SonicMaxXSumRInfo, \\\n SonicDiscretizer, RewardScaler, StochasticFrameSkip, EnvAudio, ObsMemoryBuffer, RandomStart\nfrom .core_wrapper import ObservationWrapper\n\n\ndef make_env(env_states, seed, rank, mode, args):\n def _thunk():\n env = SonicJointEnv(env_states)\n if args.use_audio:\n env = EnvAudio(env)\n env = SonicDiscretizer(env)\n env = AllowBacktracking(env)\n env = SonicMaxXSumRInfo(env)\n if mode == 'train':\n env = RewardScaler(env, scale=args.rew_scale)\n env = StochasticFrameSkip(env, args.fskip_num, args.fskip_prob, args.obs_keep_fskip)\n env = ObsMemoryBuffer(env, args.obs_mbuf)\n env = TimeLimit(env, max_episode_steps=args.max_episode_steps)\n env = RandomStart(env, args.min_random_start, args.max_random_start)\n\n env.seed(seed + rank)\n return env\n\n return _thunk\n\n\ndef make_vec_envs(env_states,\n seed,\n num_processes,\n gamma,\n device,\n mode,\n args):\n assert mode in ['train', 'eval']\n if num_processes % len(env_states) == 0:\n # one state per process\n envs = [\n make_env([env_states[i%len(env_states)]], seed, i, mode, args)\n for i in range(num_processes)\n ]\n else:\n # random sample new state on done\n envs = [\n make_env(env_states, seed, i, mode, args)\n for i in range(num_processes)\n ]\n\n if len(envs) > 1:\n envs = ShmemVecEnv(envs, context='forkserver')\n else:\n envs = DummyVecEnv(envs)\n\n envs = VecPyTorch(envs, device)\n return envs\n\n\n# Checks whether done was caused my timit limits or not\nclass TimeLimitMask(gym.Wrapper):\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n if done and self.env._max_episode_steps == self.env._elapsed_steps:\n info['bad_transition'] = True\n\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv, device):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(VecPyTorch, self).__init__(venv)\n self.device = device\n # TODO: Fix data types\n\n def reset(self):\n obs = self.venv.reset()\n obs = {k: torch.from_numpy(obs[k]).float().to(self.device) for k in obs.keys()}\n return obs\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n # Squeeze the dimension for discrete actions\n actions = actions.squeeze(1)\n actions = actions.cpu().numpy()\n self.venv.step_async(actions)\n\n def step_wait(self):\n obs, reward, done, info = self.venv.step_wait()\n obs = {k: torch.from_numpy(obs[k]).float().to(self.device) for k in obs.keys()}\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return obs, reward, done, info\n\n\nclass VecNormalize(VecNormalize_):\n def __init__(self, *args, **kwargs):\n super(VecNormalize, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs, update=True):\n if self.ob_rms:\n if self.training and update:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) /\n np.sqrt(self.ob_rms.var + self.epsilon),\n -self.clipob, self.clipob)\n return obs\n else:\n return obs\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py\nclass VecPyTorchFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack, device=None):\n self.venv = venv\n self.nstack = nstack\n\n wos = venv.observation_space # wrapped ob space\n self.shape_dim0 = wos.shape[0]\n\n low = np.repeat(wos.low, self.nstack, axis=0)\n high = np.repeat(wos.high, self.nstack, axis=0)\n\n if device is None:\n device = torch.device('cpu')\n self.stacked_obs = torch.zeros((venv.num_envs, ) +\n low.shape).to(device)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n self.stacked_obs[:, :-self.shape_dim0] = \\\n self.stacked_obs[:, self.shape_dim0:].clone()\n for (i, new) in enumerate(news):\n if new:\n self.stacked_obs[i] = 0\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n if torch.backends.cudnn.deterministic:\n self.stacked_obs = torch.zeros(self.stacked_obs.shape)\n else:\n self.stacked_obs.zero_()\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs\n\n def close(self):\n self.venv.close()\n","repo_name":"faraazn/meng","sub_path":"pytorch-a2c-ppo-acktr-gail/a2c_ppo_acktr/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26357016343","text":"\nimport scrapy\nfrom craiglist_apts.items import CraiglistAptsItem\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\n\nclass AptfinderSpider(scrapy.Spider):\n\tname = \"craig\"\n\tallowed_domains = [\"craiglist.org\"]\n\tbase_url = \"http://newyork.craigslist.org/search/que/abo?\"\n\tstart_urls = (\n\t\t'http://newyork.craigslist.org/search/que/abo?max_price=1200',\n\t)\n\n\tdef parse(self, response):\n\t\tpostings = response.xpath(\".//p[contains(@data-repost-of,'')]\")\n\t\tlistOfLocations = (postings.xpath(\"//span[@class='pnr']/*/text()\").extract())\n\t\tfor i in range (0,len(postings)-1):\n\t\t\titem = CraiglistAptsItem()\n\t\t\titem[\"craigID\"]=postings[i].xpath(\"@data-pid\").extract()\n\t\t\t\n\t\t\ttemp = postings[i].xpath(\"span[@class='txt']\")\n\t\t\ttemp2 = temp.xpath(\"span[@class='l2']\")\n\n\t\t\tinfo = temp.xpath(\"span[@class='pl']\")\n\n\t\t\titem[\"title\"]= info.xpath(\"a/span[@id='titletextonly']/text()\").extract()\n\t\t\titem[\"date\"] = info.xpath(\"time/@title\").extract()\n\t\t\titem[\"link\"] = info.xpath(\"a/@href\").extract()\n\n\t\t\titem[\"link\"]=[\"http://newyork.craigslist.org\"+i for i in item[\"link\"]]\n\t\t\titem[\"price\"] = temp2.xpath(\"span[@class='price']/text()\").extract()\n\t\t\titem[\"area\"] = temp2.xpath(\"span[@class='pnr']/small/text()\").extract()\n\t\t\t\n\t\t\tif not (\"ridgewood\" and \"astoria\" and \"brooklyn\") in [posts.lower() for posts in listOfLocations]:\n\t\t\t\tyield item\n\t\t","repo_name":"alin72/craiglist_apts","sub_path":"spiders/aptFinder.py","file_name":"aptFinder.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4677160336","text":"setting = input()\nsetting = setting.split()\n\nn = int(setting[0])\nballs = int(setting[1])\n\nmachine_lst = []\n\nfor x in range(0, n):\n\tmachines = int(input())\n\tmachine_lst.append(machines)\n\t\nmachine_rest = []\n\nfor x in range(0, n):\n\trest = balls % machine_lst[x]\n\tmachine_rest.append(rest)\n\nprint (max([i for i, x in enumerate(machine_rest) if x == min(machine_rest)])+1)","repo_name":"hujuu/py-test","sub_path":"paiza/C038最小値のindex.py","file_name":"C038最小値のindex.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"2660201026","text":"from django.db import models\n\nclass UserLocation(models.Model):\n user = models.OneToOneField(\n 'User',\n null=False,\n on_delete=models.CASCADE,\n related_name='%(app_label)s_%(class)s'\n )\n location = models.OneToOneField(\n 'Location',\n null=False,\n on_delete=models.CASCADE,\n related_name='%(app_label)s_%(class)s'\n )\n","repo_name":"giveucon/giveucon","sub_path":"backend/api/models/user_location.py","file_name":"user_location.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11787871366","text":"#!/usr/bin/python3\n\"\"\"\nmodule containing append_after function\n\"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"\n function that inserts a line of text to a file,\n after each line containing a specific string\n \"\"\"\n with open(filename, 'r', encoding=\"utf-8\") as file:\n new_str = \"\"\n for line in file:\n if line.find(search_string) != -1:\n new_str += line + new_string\n else:\n new_str += line\n with open(filename, 'w', encoding=\"utf-8\") as file:\n file.write(new_str)\n","repo_name":"amanuelgthn/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11158874199","text":"from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\nfrom .forms import ArticleForm\nfrom .models import Article, Comment, Newsletter, Video\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom projects.models import Project\nfrom events.models import Event\nfrom products.models import Product\nfrom members.models import index_images\n# Create your views here.\n\ndef videos(request):\n video = Video.objects.all()\n return render(request,\"videos.html\" ,{\"videos\" : video})\n\ndef articles(request):\n keyword = request.GET.get(\"keyword\")\n\n if keyword:\n newsletters = Newsletter.objects.filter(title__contains=keyword)\n return render(request, \"Newsletter.html\", {\"newsletters\": newsletters})\n newsletters = Newsletter.objects.all()\n\n return render(request, \"articles.html\", {\"newsletters\": newsletters})\n\ndef newsletters(request):\n keyword = request.GET.get(\"keyword\")\n\n if keyword:\n newsletters = Newsletter.objects.filter(title__contains=keyword)\n return render(request, \"newsletters.html\", {\"newsletters\": newsletters})\n newsletters = Newsletter.objects.all()\n\n return render(request, \"newsletters.html\", {\"newsletters\": newsletters})\n\n\n@login_required(login_url=\"user:login\")\ndef dashboard(request):\n projects = Project.objects.filter(author=request.user)\n events = Event.objects.filter(author=request.user)\n articles = Article.objects.filter(author=request.user)\n products = Product.objects.filter(author=request.user)\n context = {\n \"projects\": projects,\n \"article\": articles,\n \"events\": events,\n \"products\": products\n }\n return render(request, \"dashboard.html\", context)\n\n@login_required(login_url=\"user:login\")\ndef addArticle(request):\n form = ArticleForm(request.POST or None, request.FILES or None)\n\n if form.is_valid():\n article = form.save(commit=False)\n\n article.author = request.user\n article.save()\n\n messages.success(request, \"Article Created Successfully!!!\")\n return redirect(\"article:dashboard\")\n return render(request, \"addarticle.html\", {\"form\": form})\n\n\ndef detail(request, id):\n # article = Article.objects.filter(id = id).first()\n article = get_object_or_404(Article, id=id)\n\n comments = article.comments.all()\n return render(request, \"detail.html\", {\"article\": article, \"comments\": comments})\n\n\n@login_required(login_url=\"user:login\")\ndef updateArticle(request, id):\n article = get_object_or_404(Article, id=id)\n form = ArticleForm(request.POST or None, request.FILES or None, instance=article)\n if form.is_valid():\n article = form.save(commit=False)\n\n article.author = request.user\n article.save()\n\n messages.success(request, \"Article Successfully Updated\")\n return redirect(\"article:dashboard\")\n\n return render(request, \"update.html\", {\"form\": form})\n\n\n@login_required(login_url=\"user:login\")\ndef deleteArticle(request, id):\n article = get_object_or_404(Article, id=id)\n\n article.delete()\n\n messages.success(request, \"Article Successfully Deleted\")\n\n return redirect(\"article:dashboard\")\n\n\ndef addComment(request, id):\n article = get_object_or_404(Article, id=id)\n\n if request.method == \"POST\":\n comment_author = request.POST.get(\"comment_author\")\n comment_content = request.POST.get(\"comment_content\")\n\n newComment = Comment(comment_author=comment_author, comment_content=comment_content)\n\n newComment.article = article\n\n newComment.save()\n return redirect(reverse(\"article:detail\", kwargs={\"id\": id}))\n\n\n\ndef index(request):\n images = index_images.objects.all()\n number = []\n for i in range(len(images)):\n number.append(str(i))\n return render(request, \"index.html\", {\"images\": images, \"number\":number } )\n\ndef register(request):\n return(request, \"register.html\")\n\ndef about(request):\n return render(request, \"about.html\")\n\ndef events(request):\n return(request, \"events.html\")\n\ndef IPD(request):\n return render(request, \"IPD.html\")\n\ndef Publications(request):\n return render(request, \"Publications.html\")\n\ndef Collaborators(request):\n return render(request, \"Collaborators.html\")\n\ndef Conferences(request):\n return render(request, \"Conferences.html\")\n\ndef members(request):\n return render(request, \"members.html\")\n\n","repo_name":"Rutvikrj26/RuTAG_2.0","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6444554964","text":"import asyncio, toml, json, logging\nimport math\n\nfrom aiogram import Bot\nfrom aiogram.dispatcher.fsm.storage.redis import RedisStorage, StorageKey\n\nfrom tgbot.config import Config\n\n# from aiogram.dispatcher.fsm.context import FSMContext\n\nfrom schedulers.function import get_keys_redis,check_number_missed_blocks, send_message_user, get_db\nfrom schedulers.exceptions import NoSlashingInfo, raise_error\nfrom schedulers.exceptions import raise_error\nfrom funtion import * #, get_index_by_address\n\n\n\nasync def check_user_node( \n bot: Bot,\n config: Config,\n storage: RedisStorage,\n type_network: str,\n network: str\n ) -> None:\n \n # get_db()\n\n config_toml = toml.load(\"config.toml\")\n \n\n \n time_repeat = config_toml[\"networks\"][type_network][network][\"time_repeat\"]\n allow_missed_block = config_toml[\"networks\"][type_network][network][\"missed_blocks\"]\n urls = await check_url(config_toml[\"networks\"][type_network][network][\"rpc\"])\n \n if urls[\"active_urls\"] == []:\n\n for id in config.tg_bot.admin_ids:\n logging.info(id)\n await bot.send_message(\n chat_id=id,\n text=f'RPC not working 🔴, network {network} ScreenValidator',\n )\n\n return \n\n url = urls[\"active_urls\"][0]\n\n keys = get_keys_redis(config)\n\n logging.info(f\"\\n\\nNEW NETWORK {type_network} -> {network}\\n\") \n logging.info(f\"Number of clients: {len(keys)}\\n\") \n\n for key in keys:\n key = key.split(\":\")\n\n bot_id = int(key[1])\n chat_id = int(key[2])\n user_id = int(key[3])\n \n\n data = await storage.get_data(bot, StorageKey(bot_id=bot_id, chat_id=chat_id, user_id=user_id))\n\n if type_network in data[\"validators\"]:\n \n\n if network in data[\"validators\"][type_network]:\n list_users = list(data[\"validators\"][type_network][network].keys())\n logging.info(f\"{network} {type_network} -> User: id - {user_id}\")\n logging.info(f\"{network} {type_network} -> Monikers: {len(list_users)}\\n\")\n\n for moniker in data[\"validators\"][type_network][network].keys():\n logging.info(f\"{network} {type_network} -> Moniker: {moniker}\")\n\n signing_infos = await slashing_signing_info_all(url, config_toml[\"networks\"][type_network][network][\"path_bin\"])\n signing_info = signing_infos[await get_index_by_consAddr(data[\"validators\"][type_network][network][moniker][\"const_addr\"], signing_infos)]\n\n first_snapshot_missed_block = data[\"validators\"][type_network][network][moniker][\"last_missed_block\"]\n second_snapshot_missed_block = int(signing_info.get(\"missed_blocks_counter\"))\n\n if await check_number_missed_blocks(\n first_snapshot_missed_block, \n second_snapshot_missed_block, \n allow_missed_block, \n network, \n type_network\n ):\n\n await send_message_user(\n bot=bot, \n chat_id=chat_id, \n moniker=moniker, \n missed_blocks=second_snapshot_missed_block, \n network=network, \n type_network=type_network,\n config=config_toml[\"networks\"][type_network][network]\n )\n\n data[\"validators\"][type_network][network][moniker][\"last_missed_block\"] = second_snapshot_missed_block\n\n \n\n data[\"rpc\"] = urls\n\n\n await storage.update_data(bot, StorageKey(bot_id=bot_id, chat_id=chat_id, user_id=user_id), data)\n logging.debug(f\"{network} {type_network} -> Data sheduler: {data}\")\n\n","repo_name":"MaxMavaIll/Screen","sub_path":"schedulers/job_new.py","file_name":"job_new.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20240370542","text":"#creation of a list\nmy_list=[\"apple\",\"banana\",\"orange\",105,-234] #creation of a list\n#print the list\nprint (my_list)\n\n#In the following way we ask python to take the string at index [0]\n#and pick the character at index [2]\nprint ((my_list[0])[2])\n\n#In the following way we ask python to take the string at index [0]\n#and pick the character at index [3]\nprint ((my_list[0])[3]) \n\n#CHALLENGE:create a loop that print out only float from the list\n#counter to be use as a index value\na=-1\n#i will take the same value of the value picked from the list\nfor i in my_list:\n a+= 1\n #with this function we check if the value is a float\n def check(i):\n try:\n float(i)\n return True\n except ValueError:\n return False\n #if the value is a float we print it \n if check(i) == True:\n print (my_list[a])\n\n","repo_name":"GrisoFandango/week-7","sub_path":"3 - List.py","file_name":"3 - List.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"13454789201","text":"from flask import Blueprint, request\nfrom ..models import db, Deck, User, Favorite\n\nbp = Blueprint(\"favorites\", __name__, url_prefix=\"\")\n\n\n@bp.route(\"//decks/favorites\")\n# get favorites by user id\ndef get_favorites(userid):\n favorites = Favorite.query.options(db.joinedload(\n \"favdeck\")).filter_by(user_id=userid).all()\n\n data = [\n favorite.deck_id\n for favorite in favorites]\n return {\"data\": data}\n\n\n@bp.route(\"///favorites\", methods=['POST'])\n# push favorite deck into favorites\ndef post_favorites(userid, deckid):\n data = request.json\n deck = Deck.query.filter_by(id=deckid).first()\n favorites = Favorite(**data)\n db.session.add(favorites)\n db.session.commit()\n decks = {}\n fav = {}\n fav[favorites.id] = {\"deck_id\": deck.id}\n decks[deck.id] = {\"id\": deck.id, \"title\": deck.title, \"user_id\": deck.user_id,\n \"description\": deck.description}\n return {\n \"decks\": decks,\n \"fav\": favorites.deck_id\n }\n\n\n@bp.route(\"///favoritedelete\", methods=['DELETE'])\n# push favorite deck into favorites\ndef delete_favorites(userid, deckid):\n data = request.json\n favorite = Favorite.query.filter_by(user_id=userid, deck_id=deckid).first()\n db.session.delete(favorite)\n db.session.commit()\n user = User.query.options(db.joinedload(\"favoriteDecks\")).filter_by(\n id=userid).first()\n fav_deck_ids = []\n decks = {}\n for deck in user.favoriteDecks:\n fav_deck_ids.append(deck.id)\n decks[deck.id] = {\"id\": deck.id, \"title\": deck.title, \"user_id\": deck.user_id,\n \"description\": deck.description}\n return {\"decks\": decks, \"favoritedecks\": fav_deck_ids}\n","repo_name":"SauceKnight/backend_flashcard","sub_path":"app/routes/favorites.py","file_name":"favorites.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"24286617493","text":"from pprint import pprint\n\nfrom django.contrib.auth.models import User\n\nfrom .models import AnswerHistory, Answer, Question\nfrom django.db.models import Max, Q, Count\nfrom itertools import combinations\nfrom random import choice\n\n\ndef get_active_questions():\n return Question.objects.filter(inactive=False)\n\n\ndef get_latest_proper_answered_question(user):\n return AnswerHistory.objects.filter(Q(question__inactive=False)\n & Q(created_by=user)\n )\n\n\ndef get_active_categories():\n stats = {}\n for q in get_active_questions():\n for sc in q.subcategory.filter(inactive=False):\n subcategory = sc.text\n category = sc.category.text\n stats.setdefault(category, {}).setdefault(subcategory, {\n 'no_of_questions': 0,\n 'no_of_answers': 0,\n 'score': 0,\n 'progress': 0\n })\n stats[category][subcategory]['no_of_questions'] += 1\n return stats\n\n\ndef calculate_stats(user):\n stats = get_active_categories()\n ah = get_latest_proper_answered_question(user)\n tries = ah.values('question__subcategory__category__text',\n 'question__subcategory__text').annotate(total=Count('id'))\n latest = ah.values('question__id').annotate(id=Max('id'))\n correct = ah.filter(Q(pk__in=list(a['id'] for a in latest))\n & Q(is_correct=True)\n ).values('question__subcategory__category__text',\n 'question__subcategory__text').annotate(total=Count('id'))\n for t in tries:\n category = t['question__subcategory__category__text']\n subcategory = t['question__subcategory__text']\n stats[category][subcategory]['no_of_answers'] = t['total']\n total = correct.filter(Q(question__subcategory__category__text=category)\n & Q(question__subcategory__text=subcategory)\n )\n if len(total) > 0:\n stats[category][subcategory]['score'] = total[0]['total']\n stats[category][subcategory]['progress'] = int(round(\n stats[category][subcategory]['score'] / stats[category][subcategory]['no_of_questions'] * 100, 0))\n return stats\n\n\ndef slowest_progress(stats):\n categories = []\n progress = 100\n for category in stats:\n for subcategory in stats[category]:\n sp = stats[category][subcategory]['progress']\n if sp == 100:\n continue\n elif sp < progress:\n progress = sp\n categories.clear()\n categories.append((category, subcategory))\n elif sp == progress:\n categories.append((category, subcategory))\n return categories\n\n\ndef get_preferred_questions(categories, user):\n set_of_queries = []\n ah = get_latest_proper_answered_question(user)\n latest = ah.values('question__id').annotate(id=Max('id'))\n correct = ah.filter(Q(pk__in=list(a['id'] for a in latest))\n & Q(is_correct=True)\n ).values()\n base = get_active_questions().exclude(pk__in=list(c['question_id'] for c in correct))\n size = len(categories)\n for category, subcategory in categories:\n set_of_queries.append(Q(subcategory__category__text=category) & Q(subcategory__text=subcategory))\n for r in range(size, 0, -1):\n qs = Question.objects.none()\n comb = list(combinations(set_of_queries, r))\n for c in comb:\n q = base\n for o in c:\n q = q.filter(o)\n qs |= q\n if len(qs) > 0:\n return qs\n\n\ndef get_random_question(questions):\n return choice(questions)\n\n\ndef log_answer(user, question, answers, is_correct):\n match_answers = Answer.objects.filter(pk__in=answers)\n answer = AnswerHistory.objects.create(created_by=user,\n question=question,\n is_correct=is_correct)\n for a in match_answers:\n answer.selected_answers.add(a)\n answer.save()\n\n\ndef students_stats():\n students = User.objects.filter(is_active=True, is_staff=False)\n s = {}\n for student in students:\n cs = calculate_stats(student)\n questions = 0\n scores = 0\n for category in cs:\n for subcategory in cs[category]:\n questions += (cs[category][subcategory]['no_of_questions'])\n scores += (cs[category][subcategory]['score'])\n progress = int(round(scores / questions * 100, 0))\n s[student.username] = {\n 'progress': progress\n }\n return s\n","repo_name":"synowiec/elearning","sub_path":"questions/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"16108043648","text":"import os\nimport sys\nimport io\nfrom . import confit\nfrom .config_base import CONFIG_BASE\n\n_config = confit.Confit()\n\n# XXX:deprecated 1.4\n_config.deprecated(('screen', 'guess_interactive'), ('screen', 'persistent'))\n\n# XXX:deprecated 1.5\n_config.deprecated(\n ('HammerAitoffPlot', 'num_b_ribs'), ('HammerAitoffPlot', 'ribs_b'))\n_config.deprecated(\n ('HammerAitoffPlot', 'num_l_ribs'), ('HammerAitoffPlot', 'ribs_l'))\n_config.deprecated(\n (\"readcolumn\", \"use_numeric\"), (\"read_column\", \"return_numpy\"))\n_config.deprecated(\n (\"readcolumn\", \"comment_char\"), (\"read_column\", \"comment_char\"))\n\n# XXX:deprecated 1.6.4\n_config.deprecated(('printer', 'paper'), ('postscript', 'paper'))\nif sys.version_info < (3, 0, 0):\n with io.StringIO(CONFIG_BASE.decode('utf-8')) as fp:\n _config.readfp(fp)\nelse:\n with io.StringIO(CONFIG_BASE) as fp:\n _config.readfp(fp)\n\nif 'HOME' in os.environ:\n USERCONFIGFILE = os.path.join(os.environ['HOME'], '.biggles')\n if os.path.exists(USERCONFIGFILE):\n _config.read(USERCONFIGFILE)\n\n\ndef interactive():\n return hasattr(sys, \"ps1\")\n\n\ndef bool(section, option):\n global _config\n try:\n x = _config.get(section, option, \"no\")\n if x[0] == 'y':\n return 1\n except:\n return 0\n\n\ndef value(section, option, notfound=None):\n global _config\n return _config.get(section, option, notfound)\n\n\ndef options(section):\n global _config\n return _config.get_section(section)\n\n\ndef configure(*args):\n global _config\n if len(args) == 2:\n _config.set(\"default\", args[0], args[1])\n elif len(args) == 3:\n _config.set(args[0], args[1], args[2])\n","repo_name":"biggles-plot/biggles","sub_path":"biggles/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"4"} +{"seq_id":"4833527500","text":"from os.path import dirname, join\nimport os\nimport pandas as pd\nfrom datetime import date\nfrom random import randint\nfrom concurrent.futures import ThreadPoolExecutor\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, column, widgetbox\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import PreText, Select, Button, TextInput, DataTable, DateFormatter, TableColumn, Tabs, Panel, NumberEditor\nfrom bokeh.plotting import figure\nfrom bokeh.document import without_document_lock\nfrom tornado import gen\nfrom straxrpc.client import StraxClient\nfrom functools import partial\nfrom pages import page_classes\nimport json\nimport numpy as np\nfrom collections import defaultdict\n\nstrax_addr = os.environ.get(\"STRAXRPC_ADDR\", \"localhost:50051\")\nstrax = StraxClient(strax_addr)\ntry:\n dataframe_names = strax.search_dataframe_names(\"*\")\nexcept:\n dataframe_names = ['event_basics']\n\ndoc = curdoc()\nexecutor = ThreadPoolExecutor(max_workers=2)\n\nwith open(join(dirname(__file__), \"data\",\"plot_templates.json\"), \"rb\") as f:\n plot_templates = {t[\"name\"]:t for t in json.load(f)}\nrandom_src = {\"x\":np.arange(100), \"y\": 90*np.random.rand(100), \n \"time\": 10.*np.random.rand(100), \"length\":800.*np.random.rand(100),\n \"xs\":[np.arange(10) for _ in range(100)], \"ys\": [90*np.random.rand(10) for _ in range(100)]}\nsources = defaultdict(list)\nsources[\"__random__\"] = [random_src]\n\n\nshared_state = {\n \"executor\": executor, \n \"doc\": doc,\n \"dataframe_names\": dataframe_names,\n \"strax_ctx\": strax,\n \"plot_templates\": plot_templates,\n \"sources\": sources,\n}\n\n\npages = []\nfor klass in page_classes:\n page = klass(shared_state)\n pages.append(page)\n\ndef update_pages():\n try:\n dataframe_names = strax.search_dataframe_names(\"*\")\n except:\n dataframe_names = ['event_basics']\n shared_state[\"dataframe_names\"] = dataframe_names\n\n for p in pages:\n p.update()\n\nshared_state[\"update_pages\"] = update_pages\n# tabs = Tabs(tabs=[explore_panel,load_data_panel, plot_data_panel, rpc_server_details])\npanels = []\nfailed = []\nfor page in pages:\n try:\n panels.append(Panel(child=page.create_page(), title=page.title) )\n except:\n failed.append(page)\n print(\"failed to load {} page. \".format(page.title))\nupdate_pages()\ntabs = Tabs(tabs=panels)\n# def retry_failed(failed):\n# refailed = []\n# for page in failed:\n# try:\n# panel = Panel(child=page.create_page(), title=page.title)\n# tabs.tabs.append(panel)\n# except:\n# refailed.append(page)\n# if refailed:\n# doc.add_timeout_callback(partial(retry_failed, refailed), 10000)\n\n# if failed:\n# doc.add_timeout_callback(partial(retry_failed, failed), 10000)\ndoc.add_periodic_callback(update_pages, 3000)\ndoc.add_root(tabs)","repo_name":"jmosbacher/straxui","sub_path":"straxui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"16889238542","text":"github_api = 'https://api.github.com'\n\n##########################################################################\n\nimport requests\nimport json\nimport getpass\nfrom datetime import date, timedelta, timezone, datetime\nimport matplotlib.pyplot as plt\nfrom urllib.parse import urljoin\n\n\n\ndef main():\n\tprint(\"\\n#### DELIGHT TEST - STAGE DATA ENGINEER ####\\n\")\n\n\t## authentification\n\theaders = {'Authorization': 'token %s' % getToken()}\n\t\n\t## programme\n\n\t# mise en page #\n\tinput(\"Press 'enter' to download contributors...\")\n\tprint(\"\\n\")\n\t##\n\n\tlist_contributors = downloadContributors(headers)\n\n\t##\n\tinput(\"Press 'enter' to monitor commits...\")\n\tprint(\"\\n\")\n\t##\n\n\tcommitsMonitoring(headers)\n\n\t##\n\tinput(\"Press 'enter' to visualize contibutions per contributors...\")\n\tprint(\"\\n\")\n\t##\n\n\tcontributionsVisualizer(headers,list_contributors)\n\n\t##\n\tinput(\"Press 'enter' to show contributors global activity...\")\n\tprint(\"\\n\")\n\t##\n\n\tglobalActivity(headers, list_contributors)\n\n\t## fin\n\n\n######\n\ndef getToken():\n\t### on s'identifie avec un token pour ne pas avoir de limite de requêtes (60 sur Github)\n\n\tprint('# Already got a Github authentification token ? (Y/N)')\n\n\twhile True:\n\t\ttoken_available = input('Answer: ')\n\t\tif token_available == 'Y' or token_available == 'N':\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"# Value error : please enter a valid answer (Y/N)\")\n\n\tif token_available == \"Y\":\n\t\twhile True:\n\t\t\ttoken = input('Insert token here: ')\n\n\t\t\t# requête\n\t\t\theaders = {'Authorization': 'token %s' % token}\n\t\t\tres = requests.get(\n\t\t\t\tgithub_api,\n\t\t\t\theaders = headers\n\t\t\t\t)\n\n\t\t\t#parse\n\t\t\tj = json.loads(res.text)\n\n\t\t\t# check erreurs\n\t\t\tif res.status_code >= 400:\n\t\t\t\tmsg = j.get('message', 'UNDEFINED ERROR (no error description from server)')\n\t\t\t\tprint('ERROR:', msg)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tprint('# This token is valid.')\n\t\treturn token\n\n\telse:\n\t\twhile True:\n\t\t\t# entrées des identifiants\n\t\t\tusername = input('Github username: ')\n\t\t\tpassword = getpass.getpass('Github password (hidden): ')\n\n\t\t\t# rajout d'une note pour le token\n\t\t\tnote = input('Note (write anything): ')\n\n\t\t\t# requête\n\t\t\turl = urljoin(github_api, 'authorizations')\n\t\t\tpayload = {}\n\t\t\tif note:\n\t\t\t\tpayload['note'] = note\n\t\t\tres = requests.post(\n\t\t\t\turl,\n\t\t\t\tauth = (username, password),\n\t\t\t\tdata = json.dumps(payload),\n\t\t\t\t)\n\n\t\t\t# parse\n\t\t\tj = json.loads(res.text)\n\n\t\t\t# check erreurs\n\t\t\tif res.status_code >= 400:\n\t\t\t\tmsg = j.get('message', 'UNDEFINED ERROR (no error description from server)')\n\t\t\t\tprint('ERROR:', msg)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t# reception token\n\t\ttoken = j['token']\n\t\tprint('New token (copy it for another run): ', token)\n\t\treturn token\n\n######\n\ndef downloadContributors(headers):\n\t###Télécharger et formater les données des contributeurs (incluant contributeurs anonymes).\n\n\tprint(\"# Downloading contributors...\")\n\tstop = 0\n\ti = 1\n\twhile stop != 1:\n\n\t\t# requête\n\t\turl_download_contributors = urljoin(github_api, \"/repos/facebook/react/contributors\")\n\t\tres = requests.get(\n\t\t\turl_download_contributors,\n\t\t\tparams = {'page': i ,'anon': True}, # 'anon : True' pour afficher les contributeurs anonymes\n\t\t\theaders = headers\n\t\t\t)\n\n\t\t# si il ne reste plus de contributeurs à télécharger\n\t\tif res.text == \"[]\":\n\t\t\tstop = 1\n\n\t\telse:\n\t\t\tif i == 1:\n\t\t\t\t# premier parse\n\t\t\t\tcurrent_j = json.loads(res.text)\n\t\t\telse:\n\t\t\t\t# on parse et on rajoute aux contributeurs déjà collectés\n\t\t\t\tj = json.loads(res.text)\n\t\t\t\tcurrent_j = current_j + j\n\n\t\t# on change de page\n\t\ti += 1\n\n\t\t# indicateur progression\n\t\tcount = len(current_j)\n\t\tprint(str(count), end=\"\\r\")\n\n\tprint(str(count) + \"\\n# Contributors downloaded !\")\n\t#print(json.dumps(current_j, indent = 4, sort_keys =True))\n\treturn current_j\n\n######\n\ndef commitsMonitoring(headers):\n\t### Monitorer la valeur de commits journaliers. Si le nombre de commits journaliers est inférieur à 2, on doit le détecter et remonter l'information. \n\n\ttoday_date = datetime.utcnow().replace(tzinfo=timezone.utc).replace(hour=0).replace(minute=0).replace(second=0).replace(microsecond=0).isoformat()\n\t\n\t# requête\n\turl_monitoring = urljoin(github_api, \"/repos/facebook/react/commits\")\n\tres = requests.get(\n\t\turl_monitoring,\n\t\theaders = headers,\n\t\tparams = {'since' : today_date }\n\t\t)\n\n\t# parse\n\tlist_commit = json.loads(res.text)\n\n\t# affichage du nombre de commits journaliers\n\tnb_commit = len(list_commit)\n\t#print(\"Daily commit(s): \" + str(nb_commit)\n\n\t# report si moins de 2 commits dans la journée\n\tif len(list_commit) < 2:\n\t\tprint(\"Warning : daily commits below 2\")\n\n\t# écriture dans un fichier .txt\n\tfile_name = \"Report_commits_\"+str(today_date)[:10]\n\twith open(file_name, \"w\") as report_file:\n\t\treport_file.write(\"Report for the \" + str(nb_commit) + \" daily commit(s) - \" + str(today_date)[:10] + \"\\n\\n\")\n\t\tfor i in range(len(list_commit)):\n\t\t\treport_file.write(\"################## Commit \"+ str(len(list_commit)-i) + \" ##################\\n\\n\")\n\t\t\treport_file.write(json.dumps(list_commit[i].get('commit'), indent = 4, sort_keys =True))\n\t\t\treport_file.write(\"\\n\\n\")\n\tprint(\"Read '\" + file_name + \".txt' for more informations about daily commits.\")\n\n\n######\n\ndef contributionsVisualizer(headers,list_contributors):\n\t### Produire une visualisation, pour chaque nombre de contributions, la proportion des contributeurs qui ont réalisé ce nombre de contributions (le plot log-log peut être intéressant).\n\t\n\ttotal_contributions = 0\n\tproportion_contributors = {}\n\tnb_contributors = len(list_contributors)\n\n\t# création d'un dictionnaire avec pour clé le nombre de contributions et pour valeur le nombre de contributeurs \n\tfor i in range(nb_contributors):\n\t\tcurrent_contributions = list_contributors[i].get('contributions')\n\n\t\tif (current_contributions in proportion_contributors):\n\t\t\tproportion_contributors[current_contributions] += 1\n\t\telse:\n\t\t\tproportion_contributors[current_contributions] = 1\n\n\t\ttotal_contributions = total_contributions + current_contributions\n\t\n\t#print (\"Total contributions :\" + str(total_contributions))\n\n\t# tracé\n\tplt.figure(figsize=(10,5), dpi=100)\n\ty = list(proportion_contributors.values())\n\tx = list(proportion_contributors.keys())\n\tplt.loglog(x,y, '-s')\n\tplt.fill_between( x, y, color=\"skyblue\", alpha=0.4)\n\tplt.xlabel(\"Contributors\")\n\tplt.ylabel(\"Contributions\")\n\tplt.grid()\n\tplt.savefig(\"contributors_distribution.png\")\n\tplt.show()\n\n######\n\ndef globalActivity(headers, list_contributors):\n\t### Bonus : créer un aperçu de l'activité globale de ces contributeurs. \n\t### réponse : représentation du nombre de commits sur la durée pour un ou tout les contributeurs\n\t\n\turl_commit = urljoin(github_api, \"/repos/facebook/react/commits\")\n\tcommit_per_date = {}\n\tstop = 0\n\ti = 1\n\n\n\tprint(\"# Show global activity of : \\n (1) All users (long) \\n (2) One user\")\n\twhile True:\n\t\tchoice = input(\"Answer (enter a integer) : \")\n\t\tif choice == '1' or choice == '2':\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"# Value error : please enter a valid integer\")\n\n\t# téléchargement de tout les commits du repos (long car 30 par 30)\n\tif choice == '1':\n\t\twhile stop != 1:\n\n\t\t\t# requête\n\t\t\tres = requests.get(\n\t\t\t\turl_commit, \n\t\t\t\tparams = {'page': i}, \n\t\t\t\theaders = headers\n\t\t\t\t)\n\n\t\t\t# si il ne reste plus de commits à télécharger\n\t\t\tif res.text == \"[]\":\n\t\t\t\tstop = 1\n\n\t\t\telse:\n\t\t\t\tif i == 1:\n\t\t\t\t\t# premier parse\n\t\t\t\t\tlist_commit_user = json.loads(res.text)\n\t\t\t\telse:\n\t\t\t\t\t# on parse et on rajoute aux commits déjà collectés\n\t\t\t\t\tlist_commit_user_temp = json.loads(res.text)\n\t\t\t\t\tlist_commit_user = list_commit_user + list_commit_user_temp\n\n\t\t\ti += 1\n\n\t\t\t# compte progression\n\t\t\tnb_commit = len(list_commit_user)\n\t\t\tprint(str(nb_commit), end=\"\\r\")\n\n\t# téléchargement des commits d'un utilisateur\n\tif choice == '2':\n\n\t\tprint(\"# Show global activity of one of the top10 contributors : \")\n\t\tfor j in range(10):\n\t\t\tprint(\"#\" + str(j+1) + \" \" + list_contributors[j].get('login'))\n\t\t\n\t\twhile True:\n\t\t\tchoice_user = input(\"Answer (enter a integer) : \")\n\t\t\tif choice_user in ['1','2','3','4','5','6','7','8','9','10']:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"# Value error : please enter a valid integer\")\n\n\t\tprint(\"# Downloading commits...\")\n\t\twhile stop != 1:\n\n\t\t\t# requête\n\t\t\tchoice_user_name = list_contributors[int(choice_user)].get('login')\n\t\t\tres = requests.get(\n\t\t\t\t\turl_commit, \n\t\t\t\t\tparams = {'page': i ,'author': choice_user_name},\n\t\t\t\t\theaders = headers\n\t\t\t\t\t)\n\n\t\t\t# si il ne reste plus de commits à télécharger\n\t\t\tif res.text == \"[]\":\n\t\t\t\tstop = 1\n\n\t\t\telse:\n\t\t\t\tif i == 1:\n\t\t\t\t\t# premier parse\n\t\t\t\t\tlist_commit_user = json.loads(res.text)\n\t\t\t\telse:\n\t\t\t\t\t# on parse et on rajoute aux commits déjà collectés\n\t\t\t\t\tlist_commit_user_temp = json.loads(res.text)\n\t\t\t\t\tlist_commit_user = list_commit_user + list_commit_user_temp\n\n\t\t\ti += 1\n\n\t\t\t# compte progression\n\t\t\tnb_commit = len(list_commit_user)\n\t\t\tprint(str(nb_commit), end=\"\\r\")\n\n\tprint(str(nb_commit) + \"\\n# Downloading complete !\")\n\n\t# création d'un dictionnaire avec pour clé la date et pour valeur le nombre de commits effectués à cette date\n\tfor i in range(nb_commit):\n\t\tdate_commit_string = list_commit_user[i].get('commit').get('author').get('date')[:10]\n\t\tdate_commit = datetime.strptime(date_commit_string, '%Y-%m-%d')\n\t\tif (date_commit in commit_per_date):\n\t\t\tcommit_per_date[date_commit] += 1\n\t\telse:\n\t\t\tcommit_per_date[date_commit] = 1\n\t#print(commit_per_date)\n\n\t# visualisation\n\tplt.figure(figsize=(10,5), dpi=100)\n\tx,y = zip(*sorted(commit_per_date.items()))\n\tplt.stem(x,y,markerfmt=' ')\n\tplt.xlabel(\"Date\")\n\tplt.ylabel(\"Contributions\")\n\tplt.show()\n\tplt.savefig(\"global_activity.png\")\n\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"peb96/delight-test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74509962997","text":"from dataclasses import dataclass, field\nfrom functools import cached_property\nfrom typing import Any, Optional, List, Callable, Iterable\n\nfrom jax import numpy as jnp\n\n__all__ = ['Session', 'Params']\n\n\n@dataclass\nclass Args:\n \"\"\"\n Settings for VEM algorithm\n :attributes\n max_iter: outer # iterations\n e_max_iter: E step # iterations\n m_max_iter: M step # iterations\n fast: flag of fast EM, only meaningful for stationary kernels\n trial_length: trial length for fast EM, cut trials shorter ones\n clip: value for clipping newton step\n eps: small positive value for numerical stability\n stepsize: stepsize for damped newton's method\n gpfa: flag to use GPFA to accelerate\n \"\"\"\n max_iter: int = 50\n e_max_iter: int = 20\n m_max_iter: int = 20\n fast: bool = True\n trial_length: int = 100\n clip: float = 1.\n eps: float = 1e-6\n stepsize: float = 1.\n gpfa: bool = False\n\n\n@dataclass\nclass GPFAParams:\n C: Optional[Any] = None # (n_factors + n_regressors, n_channels)\n R: Optional[Any] = None # (n_channels,)\n\n\n@dataclass\nclass Params:\n \"\"\"\n Parameters and settings for vLGP\n :attributes\n n_factors: # of latent factors\n C: loading matrix, (n_factors + n_regressors, n_channels)\n K: list of kernel matrices, (n_factors, T, T) each\n L: K = LL', (n_factors, T, T)\n logdet: log determinants of K's, (n_factors, T)\n EM: settings of EM\n \"\"\"\n n_factors: int\n kernel: Iterable[Callable] = None\n C: Optional[Any] = None # (n_factors + n_regressors, n_channels)\n R: Optional[Any] = None # (n_channels,)\n K: Optional[Any] = None # (n_factors, T, T)\n L: Optional[Any] = None # (n_factors, T, T)\n logdet: Optional[Any] = None # (n_factors, T)\n seed: Optional[int] = None # random seed for reproducibility\n args: Args = field(default=Args(), repr=False, init=False) # EM algorithm settings\n gpfa: GPFAParams = field(default=Args(), repr=False, init=False)\n\n def __post_init__(self):\n if isinstance(self.kernel, Callable):\n self.kernel = [self.kernel] * self.n_factors\n\n\n@dataclass\nclass Trial:\n tid: Any\n y: Any = field(repr=False)\n x: Optional[Any] = field(default=None, repr=False) # regressors\n t: Optional[Any] = field(default=None, repr=False) # timing of bins\n z: Optional[Any] = field(default=None, repr=False) # posterior mean\n v: Optional[Any] = field(default=None, repr=False) # posterior variance\n w: Optional[Any] = field(default=None, repr=False)\n K: Optional[Any] = field(default=None, repr=False, init=False)\n L: Optional[Any] = field(default=None, repr=False, init=False)\n logdet: Optional[Any] = field(default=None, repr=False, init=False)\n T: int = field(default=None, repr=False, init=False)\n\n def __post_init__(self):\n self.y = jnp.asarray(self.y, dtype=float)\n self.T = self.y.shape[0]\n\n if self.x is not None:\n assert self.T == self.x.shape[0]\n else:\n self.x = jnp.ones((self.T, 1))\n\n if self.t is not None:\n assert self.T == self.t.shape[0]\n\n if self.z is not None:\n assert self.T == self.z.shape[0]\n\n if self.v is not None:\n assert self.T == self.v.shape[0]\n\n if self.w is not None:\n assert self.T == self.w.shape[0]\n\n def is_consistent_with(self, trial):\n return self.__class__ == trial.__class__ and \\\n self.y.shape[-1] == trial.y.shape[-1] and \\\n self.x.shape[-1] == trial.x.shape[-1]\n\n\n@dataclass\nclass Session:\n \"\"\"A trial container with some properties shared by trials\"\"\"\n binsize: Optional[float] = None\n trials: List[Trial] = field(default_factory=list, repr=False, init=False)\n T: Optional[int] = field(default=0, repr=False, init=False)\n tids: List[Any] = field(default_factory=list, repr=False, init=False)\n compact: bool = field(default=True, repr=False, init=False)\n\n def add_trial(self, tid, y, x=None, t=None):\n \"\"\"\n Add a trial to the session\n :param tid: trial's unique identifier\n :param y: binned spike train, (T, n_neurons)\n :param x: design matrix, (T, n_regressors)\n :param t: timing of each bin, (T,)\n :return:\n \"\"\"\n trial = Trial(tid, y, x, t)\n if self.trials:\n assert self.trials[0].is_consistent_with(trial)\n if trial.t is None:\n assert self.binsize is not None, 'The trial must contain field t if binsize is None'\n trial.t = jnp.arange(trial.y.shape[0] * self.binsize,\n step=self.binsize)\n else:\n self.compact = False\n self.trials.append(trial)\n self.tids.append(trial.tid)\n self.T += trial.T\n\n @cached_property\n def y(self):\n return jnp.row_stack([trial.y for trial in self.trials])\n\n @cached_property\n def x(self):\n return jnp.row_stack([trial.x for trial in self.trials])\n\n @property\n def z(self):\n return jnp.row_stack([trial.z for trial in self.trials])\n\n @property\n def v(self):\n return jnp.row_stack([trial.v for trial in self.trials])\n\n @property\n def w(self):\n return jnp.row_stack([trial.w for trial in self.trials])\n\n # preallocation 3x as stack\n # TODO: add compact representation\n","repo_name":"courtneyacheung/Hierarchical-Latent-Variable-Models-for-Neural-Data-Analysis","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"36018366473","text":"# https://leetcode.com/problems/maximum-binary-tree/description/\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def constructMaximumBinaryTree(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n stack = []\n for i in range(len(nums)):\n cur = TreeNode(nums[i])\n while (len(stack) and stack[-1].val < nums[i]):\n cur.left = stack.pop()\n if len(stack):\n stack[-1].right = cur\n stack.append(cur)\n return stack[0]\n","repo_name":"baieric/hackerRank","sub_path":"leetcode/maximum_binary_tree.py","file_name":"maximum_binary_tree.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"948042997","text":"import csv\r\nimport re\r\n\r\n# Reading the content from csv file\r\ndef read_csv():\r\n with open('src/data/chesterfield_08-03-2021_22-37-00.csv') as file:\r\n reader = csv.reader(file)\r\n content = []\r\n for row in reader:\r\n i = 0\r\n new_dict = {}\r\n new_dict['purchase_date'] = row[i]\r\n new_dict['store_location'] = row[i+1]\r\n new_dict['order_items'] = row[i+3]\r\n new_dict['payment_method'] = row[i+4]\r\n new_dict['total_spent'] = row[i+5]\r\n content.append(new_dict)\r\n return content\r\n\r\ncontent = read_csv()\r\n\r\n\r\ndef create_basket(content):\r\n mini_basket = []\r\n for dict in content:\r\n for key, value in dict.items():\r\n if key == 'order_items':\r\n split_value = value.split(',')\r\n mini_basket.append(split_value)\r\n basket = []\r\n for item in mini_basket:\r\n order = []\r\n for product in item:\r\n if product.count('-') == 1:\r\n clean_product = product.split('-')\r\n if 'Large' in clean_product[0]:\r\n replace_large = clean_product[0].replace('Large', ' ')\r\n clean_product[0] = replace_large\r\n clean_product.insert(0, 'Large')\r\n\r\n elif 'Regular' in clean_product[0]:\r\n replace_regular = clean_product[0].replace('Regular', ' ')\r\n clean_product[0] = replace_regular\r\n clean_product.insert(0, 'Regular')\r\n\r\n else:\r\n clean_product.insert(0, 'Regular')\r\n order.append(clean_product)\r\n elif product.count('-') == 2:\r\n clean_product = product.rsplit('-', 1)\r\n if 'Large' in clean_product[0]:\r\n replace_large = clean_product[0].replace('Large', ' ')\r\n clean_product[0] = replace_large\r\n clean_product.insert(0, 'Large')\r\n\r\n elif 'Regular' in clean_product[0]:\r\n replace_regular = clean_product[0].replace('Regular', ' ')\r\n clean_product[0] = replace_regular\r\n clean_product.insert(0, 'Regular')\r\n\r\n else:\r\n clean_product.insert(0, 'Regular')\r\n order.append(clean_product)\r\n basket.append(order)\r\n return basket\r\n \r\nbasket = create_basket(content)\r\n\r\n\r\n\r\n# # Extracts basket data and stores in a list of order lists which contains a dictionary for each basket entry per order\r\n# def create_basket():\r\n# basket = []\r\n# list_split = []\r\n# for dict in content:\r\n# for key, value in dict.items():\r\n# if key == 'order_items':\r\n# split_values = value.split(',')\r\n# # print(split_values)\r\n# list_split.append(split_values)\r\n# # print(list_split)\r\n# for item in list_split:\r\n# i = 0\r\n# order = []\r\n# while i != len(item):\r\n# new_dict = {}\r\n# new_dict['size'] = item[i]\r\n# new_dict['product'] = item[i+1]\r\n# new_dict['product_price'] = item[i+2]\r\n# i += 3\r\n# order.append(new_dict)\r\n# else:\r\n# pass\r\n# basket.append(order)\r\n# return basket\r\n\r\n# basket = create_basket()\r\n\r\n# Deleting order's items\r\ndef delete_orderitems(content):\r\n for dict in content:\r\n del dict['order_items']\r\n return content\r\n\r\ntransaction = delete_orderitems(content)\r\nprint(transaction)\r\n","repo_name":"LuisHenrique1994/ETL-project","sub_path":"normalisation_test.py","file_name":"normalisation_test.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30813776155","text":"import bpy\nimport numpy as np\nfrom mathutils import Matrix, Vector\n\nfrom collada import Collada\nfrom collada.common import tag\nfrom collada.geometry import Geometry\nfrom collada.material import Effect, Material\nfrom collada.scene import Node, Scene\nfrom collada.scene import GeometryNode, MaterialNode\nfrom collada.scene import MatrixTransform\nfrom collada.source import FloatSource, InputList\n\n\ndef save(op, context,\n filepath=None,\n directory=None,\n export_as=None,\n **kwargs):\n\n ex = ColladaExport(directory, export_as)\n\n for o in context.scene.objects:\n ex.object(o)\n\n ex.save(filepath)\n\n return {'FINISHED'}\n\n\nclass ColladaExport(object):\n def __init__(self, directory, export_as='dae_only'):\n self._dir = directory\n self._export_as = export_as\n self._geometries = {}\n self._materials = {}\n self._collada = Collada()\n\n self._scene = Scene('main', [])\n self._collada.scenes.append(self._scene)\n self._collada.scene = self._scene\n\n def save(self, fp):\n self._collada.write(fp)\n\n def object(self, b_obj, parent=None, children=True):\n b_matrix = b_obj.matrix_world\n if parent:\n if children:\n b_matrix = b_obj.matrix_local\n else:\n b_matrix = Matrix()\n\n node = self.node(b_obj.name, b_matrix)\n if any(b_obj.children) and children:\n self.object(b_obj, parent=node, children=False)\n for child in b_obj.children:\n self.object(child, parent=node)\n\n if parent:\n parent.children.append(node)\n else:\n self._scene.nodes.append(node)\n\n inode_meth = getattr(self, 'obj_' + b_obj.type, None)\n if inode_meth:\n node.children.extend(inode_meth(b_obj))\n\n def node(self, b_name, b_matrix=None):\n tf = []\n if b_matrix:\n tf.append(self.matrix(b_matrix))\n node = Node(b_name, transforms=tf)\n node.save()\n return node\n\n def obj_MESH(self, b_obj):\n geom = self._geometries.get(b_obj.data.name, None)\n if not geom:\n geom = self.mesh(b_obj.data)\n self._geometries[b_obj.data.name] = geom\n matnodes = []\n for slot in b_obj.material_slots:\n sname = slot.material.name\n if sname not in self._materials:\n self._materials[sname] = self.material(slot.material)\n matnodes.append(MaterialNode('none', self._materials[sname],\n inputs=[]))\n return [GeometryNode(geom, matnodes)]\n\n def mesh(self, b_mesh):\n vert_srcid = b_mesh.name + '-vertary'\n vert_f = [c for v in b_mesh.vertices for c in v.co]\n vert_src = FloatSource(vert_srcid, np.array(vert_f), ('X', 'Y', 'Z'))\n\n sources = [vert_src]\n\n smooth = list(filter(lambda f: f.use_smooth, b_mesh.faces))\n if any(smooth):\n vnorm_srcid = b_mesh.name + '-vnormary'\n norm_f = [c for v in b_mesh.vertices for c in v.normal]\n norm_src = FloatSource(vnorm_srcid, np.array(norm_f), ('X', 'Y', 'Z'))\n sources.append(norm_src)\n flat = list(filter(lambda f: not f.use_smooth, b_mesh.faces))\n if any(flat):\n fnorm_srcid = b_mesh.name + '-fnormary'\n norm_f = [c for f in flat for c in f.normal]\n norm_src = FloatSource(fnorm_srcid, np.array(norm_f), ('X', 'Y', 'Z'))\n sources.append(norm_src)\n\n name = b_mesh.name + '-geom'\n geom = Geometry(self._collada, name, name, sources)\n\n if any(smooth):\n ilist = InputList()\n ilist.addInput(0, 'VERTEX', _url(vert_srcid))\n ilist.addInput(1, 'NORMAL', _url(vnorm_srcid))\n # per vertex normals\n indices = np.array([\n i for v in [\n (v, v) for f in smooth for v in f.vertices\n ] for i in v])\n if _is_trimesh(smooth):\n p = geom.createTriangleSet(indices, ilist, 'none')\n else:\n vcount = [len(f.vertices) for f in smooth]\n p = geom.createPolylist(indices, vcount, ilist, 'none')\n geom.primitives.append(p)\n if any(flat):\n ilist = InputList()\n ilist.addInput(0, 'VERTEX', _url(vert_srcid))\n ilist.addInput(1, 'NORMAL', _url(fnorm_srcid))\n indices = []\n # per face normals\n for i, f in enumerate(flat):\n for v in f.vertices:\n indices.extend([v, i])\n indices = np.array(indices)\n if _is_trimesh(flat):\n p = geom.createTriangleSet(indices, ilist, 'none')\n else:\n vcount = [len(f.vertices) for f in flat]\n p = geom.createPolylist(indices, vcount, ilist, 'none')\n geom.primitives.append(p)\n\n self._collada.geometries.append(geom)\n return geom\n\n def material(self, b_mat):\n shader = 'lambert'\n if b_mat.specular_shader == 'PHONG':\n shader = 'phong'\n elif b_mat.specular_shader == 'BLINN':\n shader = 'blinn'\n if b_mat.use_shadeless:\n shader = 'constant'\n child = {\n 'ambient': (b_mat.ambient,) * 3,\n 'emission': (b_mat.emit,) * 3,\n 'diffuse': tuple(b_mat.diffuse_color),\n }\n if b_mat.use_transparency:\n child.update({\n 'transparent': (0.,0.,0.),\n 'transparency': b_mat.alpha,\n })\n if b_mat.raytrace_mirror.use:\n child.update({\n 'reflective': tuple(b_mat.mirror_color),\n 'reflectivity': b_mat.raytrace_mirror.reflect_factor,\n })\n effect = Effect(b_mat.name + '-fx', [], shader, **child)\n mat = Material(b_mat.name, b_mat.name, effect)\n self._collada.effects.append(effect)\n self._collada.materials.append(mat)\n return mat\n\n def matrix(self, b_matrix):\n f = tuple(map(tuple, b_matrix.transposed()))\n return MatrixTransform(np.array(\n [e for r in f for e in r], dtype=np.float32))\n\n\ndef _is_trimesh(faces):\n return all([len(f.vertices) == 3 for f in faces])\n\ndef _url(uid):\n return '#' + uid\n\n","repo_name":"skrat/bpycollada","sub_path":"export_collada.py","file_name":"export_collada.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"4"} +{"seq_id":"35820274944","text":"import time # Importe le module time pour la gestion du temps\nimport serial # Importe le module serial pour la communication série\nimport mysql.connector # Importe le module mysql.connector pour la connexion à la base de données MySQL\n\nser = serial.Serial(\n port='/dev/ttyUSB0', # Port série utilisé pour la communication\n baudrate=9600, # Débit en bauds de la communication\n parity=serial.PARITY_NONE, # Parité de la communication (aucune parité)\n stopbits=serial.STOPBITS_ONE, # Bits d'arrêt de la communication\n bytesize=serial.EIGHTBITS, # Taille des octets de la communication\n timeout=5 # Délai d'attente de la communication\n)\n\nif ser.isOpen(): # Vérifie si la connexion série est déjà ouverte\n ser.close() # Ferme la connexion série\n\nser.open() # Ouvre la connexion série\nser.isOpen() # Vérifie si la connexion série est ouverte\n\nwhile True: # Boucle principale infinie\n try:\n res = ser.read(6) # Lit 6 octets depuis la connexion série\n res = res.decode() # Décode les octets en une chaîne de caractères\n res = res.split(\"-\") # Divise la chaîne de caractères en une liste en utilisant le caractère \"-\"\n print(\"Signal reçu :\", res) # Affiche le signal reçu\n\n if len(res) == 2: # Vérifie si la liste contient 2 éléments\n insertion(res) # Appelle la fonction insertion avec la liste des mesures\n\n time.sleep(1) # Attend pendant 1 seconde\n\n except:\n print('Erreur dans la boucle principale') # Affiche un message d'erreur en cas d'exception non gérée\n\n def insertion(mesures): # Définition de la fonction insertion qui prend une liste de mesures en paramètre\n try:\n connection = mysql.connector.connect(\n host='172.20.10.26', # Adresse IP du serveur MySQL\n database='pppe', # Nom de la base de données\n user='admin', # Nom d'utilisateur pour la connexion MySQL\n password='admin' # Mot de passe pour la connexion MySQL\n )\n\n print(\"Essai de connexion au serveur MySQL\") # Affiche un message de tentative de connexion\n cursor = connection.cursor() # Crée un objet curseur pour exécuter des requêtes SQL\n\n if mesures[0] == '0': # Vérifie le premier élément de la liste des mesures\n # Construit la requête SQL d'insertion pour la table mesure_batterie avec les valeurs des mesures\n mySql_insert_query = f\"INSERT INTO mesure_batterie(id_batterie, tension, timestamp) VALUES((SELECT MAX(id) FROM batterie), {mesures[1]}, timestamp)\"\n\n elif mesures[0] == '1': # Vérifie le premier élément de la liste des mesures\n # Construit la requête SQL d'insertion pour la table panneaux_solaire avec les valeurs des mesures\n mySql_insert_query = f\"INSERT INTO panneaux_solaire(tension, timestamp) VALUES({mesures[1]}, timestamp)\"\n\n elif mesures[0] == '2': # Vérifie le premier élément de la liste des mesures\n # Construit la requête SQL d'insertion pour la table releve_puissance avec les valeurs des mesures\n mySql_insert_query = f\"INSERT INTO releve_puissance(id_session, mesures) VALUES((SELECT MAX(id) FROM session), {mesures[1]})\"\n\n print(mySql_insert_query) # Affiche la requête SQL d'insertion\n cursor.execute(mySql_insert_query) # Exécute la requête SQL\n connection.commit() # Valide les modifications dans la base de données\n print(\"Commande exécutée :\", mySql_insert_query) # Affiche un message indiquant l'exécution de la commande\n cursor.close() # Ferme le curseur\n print(\"Enregistrement inséré avec succès dans la table releve_puissance\") # Affiche un message de succès\n\n except mysql.connector.Error as error:\n print(\"Échec de l'insertion d'un enregistrement dans la table :\", error) # Affiche un message d'échec d'insertion\n return False # Retourne False en cas d'échec\n\n return True # Retourne True en cas de succès\n ","repo_name":"Oliopti/pppe","sub_path":"Code_de_Olivier/reception-envoie-serveur/1v-AC-main.py","file_name":"1v-AC-main.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"14986574578","text":"from flair.data import Corpus\r\nfrom flair.datasets import ColumnCorpus\r\nfrom flair.embeddings import WordEmbeddings, StackedEmbeddings\r\nfrom flair.models import SequenceTagger\r\nfrom flair.trainers import ModelTrainer\r\nfrom flair.data import Sentence\r\nimport re\r\nimport os\r\nimport sys\r\nimport nltk\r\nimport numpy\r\nimport random\r\nimport codecs\r\nfrom threading import *\r\nfrom langdetect import detect\r\nnltk.download('punkt')\r\nnltk.download('averaged_perceptron_tagger')\r\nnltk.download('maxent_ne_chunker')\r\nnltk.download('words')\r\nnltk.download('conll2002')\r\nnltk.download('conll2000')\r\nnltk.download('brown')\r\nnltk.download('universal_tagset')\r\nfrom nltk import word_tokenize,pos_tag\r\nfrom nltk.chunk import conlltags2tree, tree2conlltags\r\nfrom pprint import pprint\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.corpus import words\r\nfrom nltk.corpus import conll2000, conll2002\r\n\r\ntrainSet = []\r\nusers = []\r\ngoldsets = []\r\nglobalUsers = []\r\nglobalCount = []\r\nonlyUsers = []\r\nfoundUsers = []\r\nthreadsL = []\r\nsema = Semaphore(1)\r\nstemmer = SnowballStemmer(\"english\")\r\n\r\n\r\n#TO GET PHRASES\r\ndef getPhrasesFromFile(fileName, st, fin):\r\n phrases= [] \r\n finArr = [] \r\n tokenized_phrases = []\r\n try:\r\n with codecs.open(fileName, 'r', \"utf-8\") as file:\r\n phrases = file.read().split(\".\")\r\n if fin > len(phrases) or fin == 0:\r\n return phrases\r\n for i in range(st,fin):\r\n finArr.append(phrases[i])\r\n return finArr\r\n except:\r\n print(\"Error in reading \" + fileName)\r\n exit()\r\n\r\n\r\n#TO MARK USERS AS WE HAVE DONE FOR TRAIN SET\r\n\r\ndef getDataFromFile(fileName):\r\n users = [] \r\n try:\r\n word_file = open (fileName, \"r\", encoding='utf-8')\r\n #print(\"file object created\")\r\n for l in word_file:\r\n user = l.replace('\\r', '').replace('\\n', '')\r\n # user = stemmer.stem(m)\r\n #else:\r\n # user = m\r\n users.append(user)\r\n return users\r\n except:\r\n print(\"Error in reading \" + fileName)\r\n if len(users)>0:\r\n print(users[len(users)-1])\r\n exit()\r\n\r\n#TO GET ROOTS FROM GOLDEN SETS\r\n\r\ndef getWordsFromGoldenSet(fileName):\r\n users = [] \r\n try:\r\n word_file = open (fileName, \"r\", encoding='utf-8')\r\n print(\"file object created\")\r\n for l in word_file:\r\n users.append(l.replace('\\r', '').replace('\\n', ''))\r\n return users\r\n except:\r\n print(\"Error in reading \" + fileName)\r\n if len(users)>0:\r\n print(users[len(users)-1])\r\n exit()\r\n\r\n#TO GET DATA FROM TEST FILE AND USERS FILE\r\ntestmodelpath= os.path.join(os.getcwd(),\"trainmodel\")\r\n\r\n\r\ndataFileName = str(sys.argv[1])\r\nprint(dataFileName)\r\nphSt = int(sys.argv[2])\r\nprint(phSt)\r\nphNum = int(sys.argv[3])\r\nprint(phNum)\r\npath = str(sys.argv[4])\r\nprint(path)\r\ntrName = str(os.path.join(path,\"trainnrC1.txt\"))\r\nvName = str(os.path.join(path,\"valnrC1.txt\"))\r\nteName = str(os.path.join(path,\"testnrC1.txt\"))\r\ntr = open(trName, \"w\")\r\nv = open(vName, \"w\")\r\nte = open(teName, \"w\")\r\n\r\ntr.close()\r\nv.close()\r\nte.close()\r\n\r\ntrainSet = getPhrasesFromFile(dataFileName, phSt, phNum)\r\nprint(\"Taken phrases - \" + str(trainSet))\r\nusers = getDataFromFile(os.path.join(testmodelpath,\"usersList.txt\"))\r\nprint(\"Taken users - \" + str(len(users)))\r\n#goldsets = getWordsFromGoldenSet(\"GoldenSet.txt\")\r\n#print(\"Taken goldsets - \")\r\n#print(len(goldsets))\r\n\r\n\r\ndef checkMarkedArrayPresence(phr):\r\n registeredUser = []\r\n for phrase in phr:\r\n nltk_tags = pos_tag(word_tokenize(phrase)) \r\n iob_tagged = tree2conlltags(nltk_tags) \r\n userFound = False\r\n for user in users: \r\n us = [] \r\n if user in phrase: \r\n if \" \" in user: \r\n us = user.split(\" \")\r\n else:\r\n us.append(user) \r\n iob_tagged = markUser(iob_tagged, us) \r\n regStr = \"\"\r\n newLen = 0\r\n allowAppend = False \r\n for iob in iob_tagged:\r\n if iob[2]==\"B\" :\r\n regStr = iob[0] \r\n if iob[2]==\"I\":\r\n regStr += \" \" + iob[0]\r\n if iob[2]==\"O\" and len(regStr)>0:\r\n registeredUser.append(regStr)\r\n if len(regStr.split(\" \"))>1:\r\n allowAppend = True\r\n elif len(regStr)>0 and regStr[len(regStr) - 2:]==\"er\":\r\n allowAppend = True\r\n if regStr not in globalUsers:\r\n globalUsers.append(regStr)\r\n globalCount.append(1)\r\n else:\r\n ind = globalUsers.index(regStr)\r\n globalCount[ind] = globalCount[ind]+1\r\n regStr = \"\"\r\n newLen = newLen + 1\r\n if newLen>0 or len(regStr)>0:\r\n if regStr!=\"\" and regStr not in globalUsers:\r\n globalUsers.append(regStr)\r\n globalCount.append(1)\r\n elif regStr!=\"\":\r\n ind = globalUsers.index(regStr)\r\n globalCount[ind] = globalCount[ind]+1\r\n if regStr not in foundUsers or foundUsers.count(regStr)<10:\r\n foundUsers.append(regStr)\r\n allowAppend = True\r\n if allowAppend == True:\r\n onlyUsers.append(iob_tagged)\r\n print(iob_tagged)\r\n return onlyUsers\r\n \r\n\r\n \r\ndef markUser(phrase,user):\r\n try:\r\n finTuple = []\r\n found = False \r\n if len(user)>1:\r\n nonFirst = False\r\n cnt = 0\r\n ucnt = 0\r\n for ph in phrase:\r\n finTuple.append(ph)\r\n ucnt = ucnt +1\r\n found = False\r\n for u in user: \r\n if ph[0]==u:\r\n found = True\r\n if found == True:\r\n cnt = cnt + 1\r\n else:\r\n cnt = cnt - 1 \r\n if cnt == len(user)-1: \r\n ls = list(phrase[ucnt-cnt-1])\r\n ls[2]=\"B\"\r\n finTuple[ucnt-cnt-1]=tuple(ls)\r\n for i in range(cnt):\r\n ls = list(phrase[ucnt-cnt+i])\r\n ls[2]=\"I\"\r\n finTuple[ucnt-cnt+i]=tuple(ls)\r\n cnt = 0\r\n else:\r\n for i in range(0, len(phrase)):\r\n finTuple.append(phrase[i]) \r\n if phrase[i][0] == user[0] and phrase[i][1][0]==\"N\": \r\n cnt = i-1\r\n found = False\r\n while cnt>=0 and phrase[cnt][1]==\"JJ\" or cnt>=0 and phrase[cnt][1]==\"NNP\" or cnt>=0 and phrase[cnt][1]==\"VBN\":\r\n ls = list(phrase[cnt])\r\n ls[2]=\"I\"\r\n finTuple[cnt] = (tuple(ls))\r\n cnt = cnt - 1 \r\n found = True\r\n\r\n ls = list(phrase[i])\r\n if found == False:\r\n ls[2]=\"B\"\r\n else:\r\n ls[2]=\"I\"\r\n finTuple[len(finTuple)-1] = (tuple(ls))\r\n if found == True:\r\n ls = list(phrase[cnt+1])\r\n ls[2]=\"B\"\r\n finTuple[cnt+1] = (tuple(ls))\r\n return finTuple\r\n except:\r\n print(\"Exception user marking\")\r\n\r\n\r\n\r\ndef writeResultFile(trainSet):\r\n with open(trName, 'a') as f:\r\n for arr in trainSet:\r\n for s in arr:\r\n for st in s:\r\n try:\r\n f.write((st + \" \").encode(\"utf-8\").decode())\r\n except:\r\n print(\"error\")\r\n f.write(\"\\n\") \r\n f.write(\"\\n\\n\\n\") \r\n t = 0\r\n with open(vName, 'a') as v:\r\n for arr in trainSet:\r\n t = t+1\r\n if t>3:\r\n t = 0\r\n for s in arr:\r\n for st in s:\r\n try:\r\n v.write((st + \" \").encode(\"utf-8\").decode())\r\n except:\r\n print(\"error\")\r\n v.write(\"\\n\")\r\n v.write(\"\\n\\n\\n\") \r\n t = 0\r\n with open(teName, 'a') as te:\r\n for arr in trainSet:\r\n t = t+1\r\n if t>4:\r\n t = 0\r\n for s in arr:\r\n for st in s:\r\n try:\r\n te.write((st + \" \").encode(\"utf-8\").decode())\r\n except:\r\n print(\"error\")\r\n te.write(\"\\n\")\r\n te.write(\"\\n\\n\\n\") \r\n \r\n \r\ndef writeUsersFile(): \r\n if len(globalUsers) != len(globalCount):\r\n return\r\n for i in range(0, len(globalCount)):\r\n usersFile.write(str(globalUsers[i]))\r\n usersFile.write(\": \")\r\n usersFile.write(str(globalCount[i]))\r\n usersFile.write(\"\\n\") \r\n\r\n\r\n#TO PREPARE USERS RECOGNITION FOR MULTITHREADING\r\ndef writeUsers():\r\n phr = [] \r\n threadsL.append(1) \r\n while len(trainSet)>0:\r\n try: \r\n sema.acquire()\r\n finVal = 10\r\n if len(trainSet)<10:\r\n finVal = len(trainSet) \r\n for i in range(0,finVal):\r\n phr.append(trainSet[i])\r\n for i in range(0,len(phr)):\r\n trainSet.pop(0) \r\n sema.release() \r\n checkMarkedArrayPresence(phr)\r\n phr.clear()\r\n except:\r\n print(\"Exception thread\")\r\n threadsL.pop(0)\r\n #print(\"THREAD FINISHED \" + str(len(threadsL)))\r\n if len(threadsL)==0:\r\n writeResultFile(onlyUsers)\r\n writeUsersFile()\r\n #print(\"STATISTICS WRITTEN \")\r\n \r\n\r\n#MAIN SCRIPTS\r\n\r\n\r\nrep = 0\r\ni=0\r\n\r\nusersFile = open(os.path.join(path,\"onlyDetectedUsersNR1.txt\"), \"w\")\r\n#writeUsers(trainSet, users)\r\n\r\nthreads = []\r\nfor cnt in range(0,10):\r\n threads.append(Thread(target=writeUsers))\r\n\r\n\r\nfor ph in threads:\r\n #print(\"thread start\")\r\n ph.start()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"airermakova/webPortal","sub_path":"prepareTestSetsComplexUsersNoRepeat.py","file_name":"prepareTestSetsComplexUsersNoRepeat.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33430809816","text":"def insertionSort(list):\n for i in range(1, len(list)):\n key = list[i];\n j = i - 1\n while j >=0 and key < list[j]:\n list[j+1] = list[j]\n j -= 1\n list[j+1] = key\n\narr = [2,4,5,6,5,4,1,6,7,8,9]\ninsertionSort(arr)\nprint(\"The Sorted Array is: \",arr)","repo_name":"piyush-p7/DAAOA","sub_path":"insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31031434720","text":"class Solution(object):\n def subtractProductAndSum(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n digits = [int(d) for d in str(n)]\n product = 1\n s = 0\n for d in digits:\n product *= d\n s += d\n\n return product - s","repo_name":"daicang/Leetcode","sub_path":"5279-subtract-the-product-and-sum-of-digits-of-an-integer.py","file_name":"5279-subtract-the-product-and-sum-of-digits-of-an-integer.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19082640337","text":"from selenium import webdriver\nimport selenium.webdriver.support.ui as ui\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.service import Service\nimport yaml\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport os\n\nwith open('.\\\\config.yaml', 'r') as stream:\n config = yaml.safe_load(stream)\n\nCHROMEDRIVER_PATH = config['chromedriver']\n\nlaip_url = 'https://courses.finki.ukim.mk/course/view.php?id=1696'\nos_url = 'https://courses.finki.ukim.mk/course/view.php?id=1987'\ncourses_url = 'https://courses.finki.ukim.mk'\ndna_url = 'https://courses.finki.ukim.mk/course/view.php?id=2051'\nurl = input('Enter the URL of the course: ')\nfile_to_save = input('Enter the file name where you want the recording links to be saved: ')\nfile_to_save = file_to_save.split('.')[0] + '.txt'\n\n\ndef login(driver: webdriver.Chrome):\n \n driver.get(url)\n my_login = config['Login']\n driver.find_element(By.ID, 'username').send_keys(my_login['username'])\n driver.find_element(By.ID, 'password').send_keys(my_login['password'])\n driver.find_element(By.CLASS_NAME, 'btn-submit').click()\n\n\ndef manual_login(driver: webdriver.Chrome):\n driver.get(url)\n wait = ui.WebDriverWait(driver, 60)\n element = wait.until(EC.presence_of_element_located((By.ID, \"page-course-view-weeks\")))\n\n\ndef cookie_login(driver: webdriver.Chrome):\n driver.get(courses_url)\n driver.delete_cookie('MoodleSession')\n driver.add_cookie(config['MoodleCookie'])\n driver.get(url)\n\n\nlogin_type = {'auto': login, 'manual': manual_login, 'cookie': cookie_login}\n\n\ndef main() -> None:\n s = Service(CHROMEDRIVER_PATH)\n driver = webdriver.Chrome(service=s)\n login_type[config['login_method']](driver)\n print('Logged in')\n bbb_img_url = 'https://courses.finki.ukim.mk/theme/image.php/classic/bigbluebuttonbn/1637059223/icon'\n images = driver.find_elements(By.CSS_SELECTOR, f'img[src=\"{bbb_img_url}\"]')\n print(f'Found {len(images)} BBB images')\n num_images = len(images)\n # create a file with the links to the BBB sessions\n base_recording_url = 'https://bbb-lb.finki.ukim.mk/playback/presentation/2.3/'\n folder_name = 'recording_links'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n with open(os.path.join(folder_name, file_to_save), 'w', encoding='utf-8') as f:\n for i in range(num_images):\n image = driver.find_elements(By.CSS_SELECTOR, f'img[src=\"{bbb_img_url}\"]')[i]\n link = image.find_element(By.XPATH, '..')\n link.click()\n container_div = driver.find_element(By.ID, 'bigbluebuttonbn_recordings_table')\n if container_div.text == 'There are no recording to show.':\n print('No recordings found')\n driver.back()\n continue\n table = container_div.find_element(By.TAG_NAME, 'table')\n\n rows = table.find_elements(By.TAG_NAME, 'tr')\n for row in rows[1:]:\n try:\n cells = row.find_elements(By.TAG_NAME, 'td')\n link = cells[0].find_element(By.TAG_NAME, 'a')\n link_url = link.get_attribute('data-href')\n name = cells[1].text\n date = cells[5].text\n # get the querystring rid of the link\n parsed_url = urlparse(link_url)\n rid = parse_qs(parsed_url.query)['rid'][0]\n link_url = base_recording_url + rid\n print(f'{name} {date} {link_url}')\n f.write(f'{name} {date} {link_url}\\n\\n')\n except Exception as ex:\n print(ex)\n continue\n driver.back()\n\n driver.quit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alekjarmov/Moodle-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"22290602526","text":"\"\"\"\nChapter 6: Exercise 8 - Word List File Writer\nRead the words from a file and display the following data:\n * Number of words in file\n * Longest word in the file\n * Average length of all the words in the file\n\"\"\"\n\ndef report_results(words, longest, average):\n print(f'Total number of words:\\t{str(words)}')\n print(f'Longest word:\\t{longest}')\n print(f'Average word length:\\t{str(average)}')\n\ndef main():\n\n filename = 'words.txt'\n longest_word = ''\n total_length = 0\n total_words = 0\n\n fin = open(filename)\n \n for word in fin:\n word = word.rstrip('\\n')\n word_length = len(word)\n\n total_length += word_length\n total_words += 1\n\n # Check for longest word\n if word_length > len(longest_word):\n longest_word = word\n \n fin.close()\n # Determine average length\n average = total_length / total_words \n\n # Report the data\n report_results(total_words, longest_word, average)\n\nif __name__ == '__main__':\n main()\n","repo_name":"bravocharliemike/comp_sci","sub_path":"programming_principles/chapter_6/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20746733509","text":"# Importar el móculo SQLite3\nimport sqlite3\n\nfrom Modulos_06.Paquete_1.clase_coche import *\n\n# Crear una conexión\nconexion = sqlite3.connect('ejemplo.db')\n\n# Crear nuestro curso\ncursor = conexion.cursor()\n\n# Salvar los cambios\nconexion.commit()\n\n\"\"\"\n# Create table, con concatenación de stream\ncursor.execute(\"CREATE TABLE IF NOT EXISTS coches(\" +\n \"id INTEGER PRIMARY KEY AUTOINCREMENT,\" +\n \"marca varchar(256),\" +\n \"color varchar(256),\" +\n \"velocidad int(256),\" +\n \"plazas int(256))\")\n\"\"\"\n\n# Create table, con comillas triples\ncursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS coches(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n marca varchar(256),\n color varchar(256),\n velocidad int(256),\n plazas int(256))\"\"\"\n )\n\nconexion.commit()\n\n'''\n# Insertar una fila en la table\ncursor.execute(\"\"\"INSERT INTO coches VALUES(\n null,\n 'Kia',\n 'gris marengo',\n 120,\n 4\n )\"\"\")\n\nconexion.commit()\n'''\n\n'''\n# Extraer registro seleccionados\ncursor.execute(\"SELECT * FROM coches\")\ncoches = cursor.fetchall()\nconexion.commit()\n\nfor coche in coches:\n print(\"---------------\")\n print(\"Key: \", coche[0])\n print(\"Marca: \", coche[1])\n print(\"Color: \", coche[2])\n print(\"Velocidad: \", coche[3])\n print(\"Plazas: \", coche[4])\n'''\n\n# Borrar todos los registros\ncursor.execute(\"DELETE FROM coches\")\nconexion.commit()\n\n# coche = \n\ncoches = [\n (\"Mercedes\", \"negro\", 200, 5),\n (\"Seat\", \"blanco\", 120, 4),\n (\"Volvo\", \"verde\", 150, 5)\n]\ncursor.executemany(\"INSERT INTO coches VALUES (null,?,?,?,?)\", coches)\nconexion.commit()\n\ncursor.execute(\"UPDATE coches SET velocidad = 180 WHERE marca = 'Volvo'\")\ncursor.execute(\"UPDATE coches SET color = 'amarillo' WHERE marca = 'Seat'\")\nconexion.commit()\n\n# Extraer registro seleccionados\ncursor.execute(\"SELECT * FROM coches WHERE plazas > 3\")\ncoches = cursor.fetchall()\nconexion.commit()\n\nfor coche in coches:\n print(\"---------------\")\n print(\"Key: \", coche[0])\n print(\"Marca: \", coche[1])\n print(\"Color: \", coche[2])\n print(\"Velocidad: \", coche[3])\n print(\"Plazas: \", coche[4])\n\n print(\"----------------------------------------------------------\")\n print(\"\\tKey \\tMarca \\t\\tColor \\tVelocidad \\tPlazas\")\n\nfor coche in coches:\n print(f\"\\t{coche[0]} \\t{coche[1]} \\t\\t{coche[2]} \\t{coche[3]} \\t{coche[4]}\")\n\n# Cerrar la bbdd\nconexion.close()","repo_name":"DiegoALD/Introduccion-a-Python","sub_path":"09-SQLite/SQLite.py","file_name":"SQLite.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"75212171956","text":"from enum import Enum\n\nfrom .neural_monkey_model_wrapper import NeuralMonkeyModelWrapper\nfrom .plugin_model_wrapper import PluginModelWrapper\n\n\nclass ModelType(Enum):\n \"\"\"Class enumerating supported types of model wrappers.\n\n Supported types are:\n neuralmonkey\n plugin\n \"\"\"\n \n NeuralMonkey = \"neuralmonkey\"\n Plugin = \"plugin\"\n\ndef create_model_wrapper(model_config, from_response=True):\n \"\"\"Create a model wrapper from the config dictionary.\n\n Args:\n model_config: A dictionary with the following structure\n {\n 'type': { 'neuralmonkey' | 'plugin' },\n 'input': { 'images' | 'features' },\n 'neuralmonkey': {\n 'configPath': { a string path to the Neural Monkey \n configuration file }\n 'varsPath': { a string path to the model checkpoint }\n 'dataSeries': { the name of the data series under which \n model input data are expected }\n 'srcCaptionSeries': { the name of the data series under \n which source captions are expected }\n 'greedySeries':\n 'attnSeries':\n 'bsSeries':\n },\n 'plugin': {\n 'path': { a string path to the plugin source }\n }\n }\n Only one of the keys 'neuralmonkey' and 'plugin' have to be fully\n provided, depending on the model wrapper type.\n Returns:\n A model wrapper instance.\n Raises:\n ValueError: Unsupported `type` value.\n \"\"\"\n\n model_type = model_config['type']\n name = model_config['name']\n\n if 'input' in model_config:\n runs_on_features = False if model_config['input'] == \"images\" else True\n else:\n runs_on_features = True if model_config['runsOnFeatures'].lower() \\\n == 'true' else False\n\n if model_type == ModelType.NeuralMonkey.value:\n if from_response:\n nm_config = model_config['neuralmonkey']\n else:\n nm_config = model_config\n config_path = nm_config['configPath']\n vars_path = nm_config['varsPath']\n data_series = nm_config['dataSeries']\n src_caption_series = nm_config['srcCaptionSeries']\n\n greedy_series = nm_config['greedySeries']\n attn_series = nm_config['attnSeries']\n bs_series = nm_config['bsSeries']\n\n model = NeuralMonkeyModelWrapper(config_path=config_path,\n vars_path=vars_path,\n data_series=data_series,\n src_caption_series=src_caption_series,\n runs_on_features=runs_on_features,\n caption_series=greedy_series,\n alignments_series=attn_series,\n bs_graph_series=bs_series,\n name=name)\n\n elif model_type == ModelType.Plugin.value:\n if from_response:\n plugin_config = model_config['plugin']\n else:\n plugin_config = model_config\n src_path = plugin_config['path']\n model = PluginModelWrapper(plugin_path=src_path, \n runs_on_features=runs_on_features,\n name=name)\n\n else:\n raise ValueError(\"Unsupported model type %s.\" % model_type)\n\n return model","repo_name":"ashtraysoap/macaque-reloaded","sub_path":"backend/model_wrappers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32412962170","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nREASONS = (('', 'Choose a reason'), (1, 'Giving feedback'),\n (2, 'Looking for information'), (3, 'Making a complaint'),\n (4, 'Other'),)\n\n\nclass CustomerMessage(models.Model):\n \"\"\" A model representing private messages sent by users to the staff.\"\"\"\n customer = models.ForeignKey(User, on_delete=models.CASCADE,\n related_name='customer_messages')\n reason = models.IntegerField(choices=REASONS, default=0)\n user_msg = models.TextField(max_length=512, blank=False)\n created_on = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return (f'The user {self.customer} sent a message ' +\n f'to the staff on {self.created_on} for the following ' +\n f'reason: {self.get_reason_display()}')\n","repo_name":"ChrisLPlumb91/everyone-loves-pizza","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9858369340","text":"from functools import partial\n\nimport torch\nimport torch.nn.functional as F\n\nfrom megatron import get_args\nfrom megatron import print_rank_0\nfrom megatron import get_timers\nfrom megatron import mpu\nfrom megatron.data.dataset_utils import build_train_valid_test_datasets\nfrom megatron.model import WaveFormerModel\nfrom megatron.training import pretrain\nfrom megatron.utils import average_losses_across_data_parallel_group\n\n\ndef model_provider(pre_process=True, post_process=True):\n \"\"\"Build the model.\"\"\"\n\n print_rank_0('building WaveFormer model ...')\n\n args = get_args()\n num_tokentypes = 2 if args.binary_head else 0\n\n model = WaveFormerModel(\n num_tokentypes=num_tokentypes,\n add_binary_head=args.binary_head,\n parallel_output=False,\n pre_process=pre_process,\n post_process=post_process)\n\n return model\n\n\ndef get_batch(data_iterator):\n \"\"\"Build the batch.\"\"\"\n\n keys = ['noisy_signal', 'clean_signal', 'mask', 'params']\n datatype = torch.float64\n\n # Broadcast data.\n if data_iterator is not None:\n data = next(data_iterator)\n else:\n data = None\n data_b = mpu.broadcast_data(keys, data, datatype)\n\n # Unpack.\n noisy_signal = data_b['noisy_signal'] #.long()\n clean_signal = data_b['clean_signal'] #.long()\n loss_mask = data_b['mask'] #.long()\n params = data_b['params'] #.long()\n\n return noisy_signal, clean_signal, loss_mask, params\n\ndef loss_func(loss_mask, clean_signal, output_tensor):\n\n denoised_signal, sop_logits = output_tensor\n\n loss_fn = torch.nn.MSELoss()\n lm_loss = loss_fn(denoised_signal.to(torch.float32) * loss_mask.to(torch.float32), clean_signal.to(torch.float32) * loss_mask.to(torch.float32))\n\n loss = lm_loss # + lm_loss_add\n averaged_losses = average_losses_across_data_parallel_group(\n [loss])\n return loss, {'lm loss': averaged_losses[0]}\n\n\ndef forward_step(data_iterator, model):\n \"\"\"Forward step.\"\"\"\n args = get_args()\n timers = get_timers()\n\n # Get the batch.\n timers('batch-generator').start()\n noisy_signal, clean_signal, loss_mask, params = get_batch(data_iterator)\n\n # loss_mask is used to calculate loss\n padding_mask = torch.ones(noisy_signal.shape[:2],device=noisy_signal.device) # device='cuda:0'\n gw_labels = torch.ones(noisy_signal.shape[:2],device=noisy_signal.device) * -1\n types = torch.zeros(noisy_signal.shape[:2],device=noisy_signal.device)\n\n timers('batch-generator').stop()\n\n if not args.binary_head:\n types = None\n\n # Forward pass through the model.\n output_tensor = model(noisy_signal, padding_mask, tokentype_ids=types,\n gw_labels=gw_labels)\n\n return output_tensor, partial(loss_func, loss_mask, clean_signal)\n\n\ndef train_valid_test_datasets_provider():\n \"\"\"Build train, valid, and test datasets.\"\"\"\n args = get_args()\n\n print_rank_0('> building train, validation, and test datasets '\n 'for BERT ...')\n train_ds, valid_ds, test_ds = build_train_valid_test_datasets(\n data_prefix=args.data_path,\n seq_length=args.seq_length,\n segment_length=args.segment_length)\n \n print_rank_0(\"> finished creating BERT datasets ...\")\n\n return train_ds, valid_ds, test_ds\n\n\nif __name__ == \"__main__\":\n\n pretrain(train_valid_test_datasets_provider, model_provider, forward_step)\n\n\n","repo_name":"AI-HPC-Research-Team/LIGO_noise_suppression","sub_path":"pretrain_gw.py","file_name":"pretrain_gw.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"32972821173","text":"# Check if a number is bigger than 500 AND even number, than print it. ignore the odd numbers.\n\n# my_list = [10,2,20,34,35,60,61,90,78,77,45,40,504,6,8]\n# for a in my_list:\n# if a > 500:\n# print(a,\"is bigger than 500\")\n# continue\n# if a % 2 == 0:\n# print(a, \"is an even number\")\n\n#כתוב פונקציה שמקבלת ליסט, ומחזירה את סכום כל המחרוזות של הליסט\nmy_list = [1,3,56,78,90,100,11]\ndef sum_list(list):\n print(len(list))\n\nsum_list(my_list)\n\nhob_list = [\"soccer\", \"basketball\", \"tennis\"]\ndict_3 = {\"name\":27, \"age\":27, \"hobbies\":hob_list}\n\ndef dict_validatore(a_dict):\n #{'name':string, 'age':number, 'hobbies':list}\n if not type(a_dict['name']) is str:\n print(\"name is not a string\")\n if not type(a_dict['age']) is int:\n print(\"age is not a number\")\n if not type(a_dict['hobbies']) is hob_list:\n print(\"hobbies error\")\n\ndict_validatore(dict_3)","repo_name":"oshrishaul/lesson1","sub_path":"Lesson2/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4635115705","text":"import asyncio\nimport time\n\nimport aiohttp\nfrom json import loads\nfrom asyncio.queues import Queue\nfrom aiohttp.http_websocket import WSMsgType\nfrom src.base import log, repo\nfrom .consts import WSS_PRIVATE_URL, GET, WSS_REQUEST_PATH\nfrom .utils import pre_hash, sign\n\nkey_started = 'private-started'\nkey_running = 'private-running'\nkey_pipes = 'private-pipes'\nkey_pending = 'private-pending'\nkey_subscribe = 'private-subscribe'\nkey_subscribed = 'private-subscribed'\nkey_account = 'private-ws-account'\n\nMAX_WAITING_MSG = 100\nCLOSE_SIGNAL = 'private-stream-close'\n\n\nasync def connect():\n session = aiohttp.ClientSession()\n await log.info('websocket private start')\n interrupted, times = False, 0\n while True:\n async with session.ws_connect(url=WSS_PRIVATE_URL, heartbeat=15.0) as ws:\n await log.info('websocket private connected')\n await login(ws)\n repo[key_started] = True\n repo[key_running] = True\n while running():\n await send(ws)\n if not await dispatch(ws):\n interrupted = True\n times += 1\n break\n await ws.close()\n\n if not interrupted:\n break\n await log.warning('websocket private interrupted %d, reconnecting...' % times)\n subscribes, subscribed = var(key_subscribe, dict()), var(key_subscribed, dict())\n item = subscribed.popitem()\n while item:\n subscribes[item[0]] = item[1]\n item = subscribed.popitem() if subscribed else None\n interrupted = False\n\n await close()\n await session.close()\n await log.info('websocket private has been closed')\n\n\nasync def login(ws):\n acc = var(key_account, dict())\n for i in range(10):\n if acc:\n break\n await asyncio.sleep(.5)\n if not acc:\n await log.fatal('account not found, exit')\n raise SystemExit(1)\n\n packet = _login_json(acc)\n await ws.send_json(packet)\n # await log.info('send login params %s' % packet)\n msg = await ws.receive()\n if msg.type != WSMsgType.TEXT:\n await log.error('unknown msg type %s %s' % (msg.type, msg.data))\n raise SystemExit(1)\n\n json = loads(msg.data)\n event = json.get('event')\n if event == 'login':\n await log.info('login ok')\n return\n\n await log.error('login failed by %s' % json)\n raise SystemExit(1)\n\n\nasync def send(ws):\n subscribes = var(key_subscribe, dict())\n if not subscribes:\n return\n\n pending = var(key_pending, dict())\n item = subscribes.popitem()\n while item:\n await ws.send_json(item[1])\n pending[item[0]] = item[1]\n item = subscribes.popitem() if subscribes else None\n\n\nasync def dispatch(ws) -> bool:\n msg = await ws.receive()\n if msg.type == WSMsgType.CLOSE or msg.type == WSMsgType.CLOSED:\n await log.warning('received close signal %s' % msg.type)\n return False\n elif msg.type != WSMsgType.TEXT:\n await log.warning('unknown msg type %s %s' % (msg.type, msg.data))\n return True\n\n json = loads(msg.data)\n if 'data' in json:\n arg, pipes = json['arg'], var(key_pipes, dict())\n queues = pipes.get(_key(arg['channel'], arg['instId']))\n if not queues:\n await log.warning('no subscriber at %s %s' % (arg['channel'], arg['instId']))\n else:\n for queue in queues:\n if queue.qsize() > MAX_WAITING_MSG:\n queue.get_nowait() # discard old msg\n queue.put_nowait(json['data'])\n elif 'event' in json:\n event = json['event']\n if event == 'error':\n await log.error('event error by %s' % json)\n else:\n arg, pending, subscribed = json['arg'], var(key_pending, dict()), var(key_subscribed, dict())\n if event == 'subscribe':\n key = _key(arg['channel'], arg['instId'])\n subscribed[key] = pending.pop(key)\n await log.info('subscribed %s %s' % (arg['channel'], arg['instId']))\n elif event == 'login':\n await log.info('login ok')\n else:\n await log.warning('received unknown data %s' % json)\n return True\n\n\nasync def account(acc: dict):\n var(key_account, dict()).update(acc)\n\n\nasync def subscribe(channel: str, inst_id: str, inst_type='') -> Queue:\n key = _key(channel, inst_id)\n queue = Queue()\n\n pipes, subscribes = var(key_pipes, dict()), var(key_subscribe, dict())\n queues: list = pipes.get(key)\n if queues:\n queues.append(queue)\n else:\n pipes[key] = [queue]\n subscribes[key] = _subscribe_json(channel, inst_id, inst_type)\n return queue\n\n\nasync def close():\n repo[key_running] = False\n pipes = var(key_pipes, dict())\n if not pipes:\n return\n for queues in pipes.values():\n for queue in queues:\n queue.put_nowait(CLOSE_SIGNAL)\n\n\ndef close_signal(msg: str) -> bool:\n return type(msg) == str and msg == CLOSE_SIGNAL\n\n\ndef var(key: str, default_val):\n v = repo.get(key)\n if v is None:\n v = default_val\n repo[key] = v\n return v\n\n\ndef running() -> bool:\n return repo.get(key_running) is True\n\n\ndef started() -> bool:\n return repo.get(key_started) is True\n\n\ndef _key(channel: str, inst_id: str):\n return hash(channel + inst_id)\n\n\ndef _login_json(acc: dict):\n ts = str(int(time.time()))\n signature = sign(pre_hash(ts, GET, WSS_REQUEST_PATH, ''), acc['secretkey'])\n arg = {\n 'apiKey': acc['apikey'],\n 'passphrase': acc['passphrase'],\n 'timestamp': ts,\n 'sign': signature.decode('utf-8')\n }\n return {'op': 'login', 'args': [arg]}\n\n\ndef _subscribe_json(channel: str, inst_id: str, inst_type: str):\n arg = {'channel': channel, 'instId': inst_id}\n if inst_type:\n arg['instType'] = inst_type\n return {'op': 'subscribe', 'args': [arg]}\n","repo_name":"iuShu/python","sub_path":"okx-trade/src/okx/private.py","file_name":"private.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9421591352","text":"from functools import partial\nfrom typing import Callable\n\nimport pytest\nimport torch\nfrom mir_eval.separation import bss_eval_images as mir_eval_bss_eval_images\nfrom torch import Tensor\nfrom torchmetrics.audio import SignalNoiseRatio\nfrom torchmetrics.functional.audio import signal_noise_ratio\n\nfrom unittests import _Input\nfrom unittests.helpers import seed_all\nfrom unittests.helpers.testers import MetricTester\n\nseed_all(42)\n\n\ninputs = _Input(\n preds=torch.rand(2, 1, 1, 25),\n target=torch.rand(2, 1, 1, 25),\n)\n\n\ndef _bss_eval_images_snr(preds: Tensor, target: Tensor, zero_mean: bool):\n # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]\n # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]\n if zero_mean:\n target = target - torch.mean(target, dim=-1, keepdim=True)\n preds = preds - torch.mean(preds, dim=-1, keepdim=True)\n target = target.detach().cpu().numpy()\n preds = preds.detach().cpu().numpy()\n mss = []\n for i in range(preds.shape[0]):\n ms = []\n for j in range(preds.shape[1]):\n snr_v = mir_eval_bss_eval_images([target[i, j]], [preds[i, j]], compute_permutation=True)[0][0]\n ms.append(snr_v)\n mss.append(ms)\n return torch.tensor(mss)\n\n\ndef _average_metric(preds: Tensor, target: Tensor, metric_func: Callable):\n # shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]\n # or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]\n return metric_func(preds, target).mean()\n\n\nmireval_snr_zeromean = partial(_bss_eval_images_snr, zero_mean=True)\nmireval_snr_nozeromean = partial(_bss_eval_images_snr, zero_mean=False)\n\n\n@pytest.mark.parametrize(\n \"preds, target, ref_metric, zero_mean\",\n [\n (inputs.preds, inputs.target, mireval_snr_zeromean, True),\n (inputs.preds, inputs.target, mireval_snr_nozeromean, False),\n ],\n)\nclass TestSNR(MetricTester):\n \"\"\"Test class for `SignalNoiseRatio` metric.\"\"\"\n\n atol = 1e-2\n\n @pytest.mark.parametrize(\"ddp\", [True, False])\n def test_snr(self, preds, target, ref_metric, zero_mean, ddp):\n \"\"\"Test class implementation of metric.\"\"\"\n self.run_class_metric_test(\n ddp,\n preds,\n target,\n SignalNoiseRatio,\n reference_metric=partial(_average_metric, metric_func=ref_metric),\n metric_args={\"zero_mean\": zero_mean},\n )\n\n def test_snr_functional(self, preds, target, ref_metric, zero_mean):\n \"\"\"Test functional implementation of metric.\"\"\"\n self.run_functional_metric_test(\n preds,\n target,\n signal_noise_ratio,\n ref_metric,\n metric_args={\"zero_mean\": zero_mean},\n )\n\n def test_snr_differentiability(self, preds, target, ref_metric, zero_mean):\n \"\"\"Test the differentiability of the metric, according to its `is_differentiable` attribute.\"\"\"\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=SignalNoiseRatio,\n metric_functional=signal_noise_ratio,\n metric_args={\"zero_mean\": zero_mean},\n )\n\n def test_snr_half_cpu(self, preds, target, ref_metric, zero_mean):\n \"\"\"Test dtype support of the metric on CPU.\"\"\"\n pytest.xfail(\"SNR metric does not support cpu + half precision\")\n\n @pytest.mark.skipif(not torch.cuda.is_available(), reason=\"test requires cuda\")\n def test_snr_half_gpu(self, preds, target, ref_metric, zero_mean):\n \"\"\"Test dtype support of the metric on GPU.\"\"\"\n self.run_precision_test_gpu(\n preds=preds,\n target=target,\n metric_module=SignalNoiseRatio,\n metric_functional=signal_noise_ratio,\n metric_args={\"zero_mean\": zero_mean},\n )\n\n\ndef test_error_on_different_shape(metric_class=SignalNoiseRatio):\n \"\"\"Test that error is raised on different shapes of input.\"\"\"\n metric = metric_class()\n with pytest.raises(RuntimeError, match=\"Predictions and targets are expected to have the same shape\"):\n metric(torch.randn(100), torch.randn(50))\n","repo_name":"Lightning-AI/torchmetrics","sub_path":"tests/unittests/audio/test_snr.py","file_name":"test_snr.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":1718,"dataset":"github-code","pt":"4"} +{"seq_id":"16703141652","text":"# Project - Fischer's Iris Data Set\n# Programming & Scripting Project 2020\n# Using datasets downloaded from http://archive.ics.uci.edu/ml/datasets/Iris\n\nimport numpy as np # import numpy as np\nimport pandas as pd # Import pandas as pd \nimport matplotlib.pyplot as plt # Import matplotliv.pyplot as plt\nimport matplotlib.ticker as mtick # Import matplotlib.ticker as mtick\n\nhead_row = [\"Sepal Length\", \"Sepal Width\", \"Petal Length\", # Header row for dataset\n \"Petal Width\", \"Class\"]\n\ndf = pd.read_csv(\"datasets/bezdekIris.data\", names=head_row) # Use pandas to read csv & include header row.\n # note: dataset bedekIris.data used in Project. \n\n \n# Summary of Variables\n \nwith open('outputs/summary.txt', 'w') as f: # Create a writeable text file named summary.txt\n print(\"DATASET VARIABLES SUMMARIES:\\n\\n\", df.describe(), file=f) # Use pandas df.describe() to print summary of each variable to summary text file\n\n\nsl = df[\"Sepal Length\"] # assigning each of the dataset's variables or column arrays a name\nsw = df[\"Sepal Width\"] # for ease of reading & writing of code\npl = df[\"Petal Length\"]\npw = df[\"Petal Width\"]\n\n\n \n# Histograms of the Dataset Variables\n \ncolour = ['b', 'g', 'm', 'r'] # List of colours to be used for plotting different histograms.\nn = 0 # Initialize for loop with n = 0\nn_bins = 7 # setting the number of bins in each histogram = 7\n \nfor n in range(0, 4): \n \n var = head_row[n] # For loop to iternate through variables in head_row, 4No. variables (0 to 3)\n plt.figure(n) # plotting a histogram for each variable, figures(0 - 3)\n plt.hist(df[var], bins=n_bins, facecolor=colour[n], ec=\"black\") # setting number of bins, applying different colours & a black edge colour to bins\n\n start = min(df[var]) # the start value of the histogram bins (min value of variable)\n end = max(df[var]) # the end value of the histogram bins (max value of variable)\n step = (end - start) / n_bins # variable values between bins \n plt.xticks(np.arange(start, (end + step) ,step)) # applying bin values to the xticks of histogram \n \n plt.xlabel(var) # label x-axis (variable name)\n plt.ylabel(\"Sample Freq\") # label y-axis (sample frequency)\n plt.title(\"Histogram of \" + var) # add titles \n plt.grid(which='major', axis='y', linestyle='dotted', alpha=0.8) # adding grid \n plt.savefig(fname=\"outputs/\" + var + \" histogram\") # saving each plot as a individual file in the outputs folder \n n = n + 1\n\n\n\n# Combined Histograms of the Dataset Variables\n# all variable histograms plotted on a single figure\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharey=True) # using subplots to provide 4no histograms on a single figure (2 rows x 2 columns)\n # y axis shared between plots (sharey=True)\n \nax1.hist(sl, weights=np.ones(len(sl)) / len(sl), \n bins=n_bins, facecolor='blue', ec=\"black\", # axes 1 - Sepal Length (sl), no of bins, colour of plot, edge colour & label. \n label=\"Sepal Length (cm)\") # weights added to display histogram as percentage.\n\nax1.set(ylabel=\"Percentage of Samples\") # applying the shared y axis label\n\nstart = min(sl) # the start value of the histogram bins (min value of variable)\nend = max(sl) # the end value of the histogram bins (max value of variable)\nstep = (end - start) / n_bins # variable values between bins \nax1.set_xticks(np.arange(start, (end + step) ,step)) # applying bin values to the xticks of histogram \nax1.yaxis.set_major_formatter(mtick.PercentFormatter(1)) # displaying the y-axis values as percentage of samples\nax1.grid(which='major', axis='y', linestyle='dotted', alpha=0.8) # adding horizontal grid to histogram\nax1.legend()\n\n \nax2.hist(sw, weights=np.ones(len(sw)) / len(sw), # axes 2 - Sepal Width (sw), no of bins, colour of plot, edge colour & label.\n bins=n_bins, facecolor='green', ec=\"black\", # weights added to display histogram as percentage.\n label=\"Sepal Width (cm)\")\n\nstart = min(sw) # as per axes 1\nend = max(sw) \nstep = (end - start) / n_bins \nax2.set_xticks(np.arange(start, (end + step) ,step)) \nax2.yaxis.set_major_formatter(mtick.PercentFormatter(1))\nax2.grid(which='major', axis='y', linestyle='dotted', alpha=0.8) \nax2.legend()\n\n\nax3.hist(pl, weights=np.ones(len(pl)) / len(pl), # axes 3 - Petal Length (pl), no of bins, colour of plot, edge colour & label.\n bins=n_bins, facecolor='m', ec=\"black\", # weights added to display histogram as percentage.\n label=\"Petal Length (cm)\")\n\nax3.set(ylabel=\"Percentage of Samples\")\n\nstart = min(pl) # as per axes 1\nend = max(pl)\nstep = (end - start) / n_bins\nax3.set_xticks(np.arange(start, (end + step) ,step))\nax3.yaxis.set_major_formatter(mtick.PercentFormatter(1))\nax3.grid(which='major', axis='y', linestyle='dotted', alpha=0.8)\nax3.legend()\n\n\nax4.hist(pw, weights=np.ones(len(pw)) / len(pw), # axes 4 - Petal Width (pw), no of bins, colour of plot, edge colour & label.\n bins=n_bins, facecolor='red', ec=\"black\", # weights added to display histogram as percentage.\n label=\"Petal Width (cm)\")\n\nstart = min(pw) # as per axes 1\nend = max(pw)\nstep = (end - start) / n_bins\nax4.set_xticks(np.arange(start, (end + step) ,step))\nax4.yaxis.set_major_formatter(mtick.PercentFormatter(1))\nax4.grid(which='major', axis='y', linestyle='dotted', alpha=0.8)\nax4.legend()\n\nplt.tight_layout() # adjusts subplots so all fit within figure\nplt.savefig(fname=\"outputs/Combined Histograms\") # save file to outputs folder\n \n\n\n# Scatter Plots - Pairs of Variables \n# all pairs of variables on individual subplots plotted\n# on a 1 single figure\n \nf, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2) # using subplots to provide 6no. plots on a single figure f (3 rows x 2 columns)\nf.suptitle(\"Scatter Plots - Pairs of Variables\") # title for figure\n\nax1.scatter(sl, sw, marker='.', c='b', label=\"SL v SW\") # axes 1 - sepal length (SL) vs sepal width (SW), adjusting marker & colour, adding label\nax1.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 1, adjusting textsize, marker scale & legend box colour.\n\nax2.scatter(sl, pl, marker='.', c='g', label=\"SL v PL\") # axes 1 - sepal length (SL) vs petal length (PL)\nax2.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 2\n\nax3.scatter(sl, pw, marker='.', c='m', label=\"SL v PW\") # axes 1 - sepal length (SL) vs petal width (PW)\nax3.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 3\n\nax4.scatter(sw, pl, marker='.', c='c', label=\"SW v PL\") # axes 1 - sepal width (SW) vs petal length (PL)\nax4.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 4\n\nax5.scatter(sw, pw, marker='.', c='r', label=\"SW v PW\") # axes 1 - sepal width (SW) vs petal width (PW)\nax5.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 5\n\nax6.scatter(pl, pw, marker='.', c='y', label=\"PL v PW\") # axes 6 - petal length (PL) vs petal width (PW)\nax6.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # legend for axes 6\n\nplt.savefig(fname=\"outputs/Scatter Plots - Pairs of Variables\") # saving plot in the outputs folder \n\n\n\n# Combined Scatter Plots - Pairs of Variables\n# all pairs of vaiables combined on a single scatter plot \n \nplt.figure(6) # combination of above plots on a single plot or axes\nplt.scatter(sl, sw, marker='.', c='b', label=\"SL v SW\") \nplt.scatter(sl, pl, marker='.', c='g', label=\"SL v PL\")\nplt.scatter(sl, pw, marker='.', c='m', label=\"SL v PW\") \nplt.scatter(sw, pl, marker='.', c='c', label=\"SW v PL\")\nplt.scatter(sw, pw, marker='.', c='r', label=\"SW v PW\") \nplt.scatter(pl, pw, marker='.', c='y', label=\"PL v PW\")\nplt.legend(fontsize=\"x-small\", markerscale=2, edgecolor=\"black\") # adjusting legend properties\nplt.title(\"Combined Scatter Plots - Pairs of Variables\") # adding title to plot\n\nplt.savefig(fname=\"outputs/Combined Scatter Plots - Pairs of Variables\") # saving plot in the outputs folder \n\nplt.show() # plot to screen all plots\n\n\n","repo_name":"PaulSweeney89/P-S-Tasks","sub_path":"Project-Iris/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18003891240","text":"from app import app\nfrom dao import Dao\nfrom venmo import Venmo\n\n@app.route('/webhook', methods=['POST'])\ndef handle_order():\n result = request.form\n jsons = json.loads(result['envelope'])\n from_ = jsons['from']\n to = jsons['to']\n message = result['text']\n subject = result['subject']\n dao = Dao()\n store = dao.getStore(to)\n #check if store exists\n item = dao.getItem(subject.trim(), store.email)\n #send venmo payment\n venmo = Venmo();\n venmo.chargeByEmail(from_, to, item)\n#\tfor key in result:\n#\t\tprint('Key: ' + str(key) + ' Result: ' + result[key])\n print('From: ' + from_ + ' Subject' + subject + ' Message: ' + message)\n return str(request.form)","repo_name":"chrisrodz/jacarllu","sub_path":"sendgrid_webhook.py","file_name":"sendgrid_webhook.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30568752123","text":"from typing import Literal\n\nfrom utils import get_question_input\n\ndef translate_round(round_line:str):\n \"\"\"A Y => Rock, Paper\"\"\"\n items = round_line.strip().split()\n \n opponent_move = items[0]\n if opponent_move == \"A\":\n opponent_move = \"rock\"\n if opponent_move == \"B\":\n opponent_move = \"paper\"\n if opponent_move == \"C\":\n opponent_move = \"scissors\"\n\n player_move = items[1]\n if player_move == \"X\":\n player_move = \"rock\"\n if player_move == \"Y\":\n player_move = \"paper\"\n if player_move == \"Z\":\n player_move = \"scissors\"\n \n return [opponent_move, player_move]\n\n\ndef get_score_for_round(opponent_move: Literal[\"rock\", \"paper\", \"scissors\"], player_move: Literal[\"rock\", \"paper\", \"scissors\"]):\n total_score = 0\n # player move points:\n if player_move == \"rock\":\n total_score += 1\n elif player_move == \"paper\":\n total_score += 2\n elif player_move == \"scissors\":\n total_score += 3\n\n SCORE_LOSE = 0\n SCORE_DRAW = 3\n SCORE_WIN = 6\n\n if opponent_move == player_move:\n total_score += SCORE_DRAW\n if (opponent_move == \"rock\" and player_move == \"paper\") or (opponent_move == \"paper\" and player_move == \"scissors\") or (opponent_move == \"scissors\" and player_move == \"rock\"):\n total_score += SCORE_WIN\n if (opponent_move == \"rock\" and player_move == \"scissors\") or (opponent_move == \"paper\" and player_move == \"rock\") or (opponent_move == \"scissors\" and player_move == \"paper\"):\n total_score += SCORE_LOSE\n \n return total_score\n\n \nif __name__ == \"__main__\":\n question_input = get_question_input(\"02.txt\")\n \n translated_rounds = (translate_round(r) for r in question_input.splitlines())\n scored_rounds = (get_score_for_round(*r) for r in translated_rounds)\n\n total = sum(scored_rounds)\n print(total)","repo_name":"rbrunt/advent-of-code","sub_path":"2022/day_02_1.py","file_name":"day_02_1.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23359901665","text":"from pyoptix.mixins.destroyable import DestroyableObject\n\n\nclass ScopedObject(DestroyableObject):\n def __init__(self, native):\n DestroyableObject.__init__(self, native)\n self._variables = {}\n\n def __setitem__(self, key, value):\n from pyoptix.variable import Variable\n\n variable_wrapper = self._safe_native.query_variable(key)\n declared = False\n\n if variable_wrapper is None:\n variable_wrapper = self._safe_native.declare_variable(key)\n declared = True\n\n try:\n optix_variable = Variable(variable_wrapper)\n optix_variable.value = value\n self._variables[key] = optix_variable\n except Exception as e:\n if declared:\n self._safe_native.remove_variable(variable_wrapper)\n raise e\n\n def __getitem__(self, key):\n return self._variables[key].value\n\n def __len__(self):\n return len(self._variables)\n\n def __delitem__(self, key):\n wrapped_variable = self._safe_native.query_variable(key)\n if not wrapped_variable.is_valid():\n raise ValueError(\"Variable not found\")\n\n self._safe_native.remove_variable(wrapped_variable)\n del self._variables[key]\n\n def __contains__(self, item):\n return item in self._variables\n","repo_name":"ozen/PyOptiX","sub_path":"pyoptix/mixins/scoped.py","file_name":"scoped.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"4"} +{"seq_id":"35384562193","text":"\"\"\"\r\ngeneric script\r\n\r\ntext: \"fooziman\" output => \"fzmn\" \r\ntext: \"barziman\" output => \"brzmn\" \r\ntext: \"qux\" output => \"qx\" \r\n\"\"\"\r\n\r\n\r\ndef fn_hack_2(result):\r\n # Lista de vocales para removerlas del string result\r\n vocales_list = [\"a\",\"e\",\"i\",\"o\",\"u\"]\r\n for i in result:\r\n if (i in vocales_list):\r\n result = result.replace(i, \"\") \r\n return result","repo_name":"joselpc84/hack5_python","sub_path":"hack_2.py","file_name":"hack_2.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"35564324223","text":"from project.people.child import Child\nfrom project.rooms.alone_old import AloneOld\nfrom project.rooms.alone_young import AloneYoung\nfrom project.rooms.room import Room\nfrom project.rooms.young_couple import YoungCouple\nfrom project.rooms.young_couple_with_children import YoungCoupleWithChildren\n\n\nclass Everland:\n def __init__(self):\n self.rooms = []\n\n def add_room(self, room: Room):\n self.rooms.append(room)\n\n def get_monthly_consumptions(self):\n total_consumption = 0\n for room in self.rooms:\n total_consumption += room.expenses\n return f\"Monthly consumptions: {total_consumption:.2f}$.\"\n\n def pay(self):\n result = []\n for rooms in self.rooms:\n # calculation = Room.calculate_expenses(rooms, rooms.children, rooms.appliances)\n if rooms.budget >= rooms.expenses:\n rooms.budget -= rooms.expenses\n result.append(f\"{rooms.family_name} paid {rooms.expenses}$ and have {rooms.budget:.2f}$ left.\")\n else:\n self.rooms.remove(rooms)\n result.append(f'{rooms.family_name} does not have enough budget and must leave the hotel.')\n\n return '\\n'.join(sorted(result))\n def status(self):\n result = \"\"\n\n result += f\"Total population: {sum([r.members_count for r in self.rooms])}\\n\"\n for r in self.rooms:\n result += f\"{r.family_name} with {r.members_count} members. Budget: {r.budget:.2f}$, Expenses: {r.expenses:.2f}$\\n\"\n if r.children:\n counter = 0\n for c in r.children:\n counter += 1\n result += f\"--- Child {counter} monthly cost: {(c.cost * 30):.2f}$\\n\"\n if hasattr(r, 'appliances'):\n total_expenses = 0\n for a in r.appliances:\n total_expenses += a.get_monthly_expense()\n result += f\"--- Appliances monthly cost: {total_expenses:.2f}$\\n\"\n\n return result","repo_name":"vasetousa/re-visit-Python-OOP","sub_path":"Exam_22_Aug_2020/project/everland.py","file_name":"everland.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29534317844","text":"from django.urls import path\nfrom django.views.generic import TemplateView\nfrom apps.sitio.views import *\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name='index.html'), name='index'),\n path('acerca_de/', TemplateView.as_view(template_name='acerca_de.html'), name='acerca_de'),\n path('signup/', signup, name='signup'),\n path('accounts/login/', signin, name='signin'),\n path('sign_out/', sign_out, name='signout'),\n path('500', server_error, name='server_error'),\n path('404', page_not_found_error, name='page_not_found_error'),\n]\n\nhandler500 = server_error\nhandler404 = page_not_found_error\nhandler403 = permission_denied_error","repo_name":"MatiPendino/cocina-salud","sub_path":"CocinaSalud/apps/sitio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24489742172","text":"import json\n\nfrom core.db.models import Question\nfrom core.db.models import Tag\nfrom core.db.models import User\n\nfrom tests.base import AuthAppTestCase\n\nURI = '/questions/{id}'\n\n\ndef get_valid_data():\n return {\n 'title': 'Title',\n 'body': 'Body',\n 'tags': ['JavaScript', 'ReactJS']\n }\n\n\nclass TestWithInvalidParams(AuthAppTestCase):\n\n def test_with_invalid_id(self):\n\n # Call\n response = self.fetch(URI.format(id=0), method='PUT', body=json.dumps(dict()))\n body = self.response_dict(response)\n\n # Check response\n self.assertEqual(404, response.code)\n self.assertEqual('No Question with id 0', body[\"error\"]['message'])\n\n\nclass TestWithValidParams(AuthAppTestCase):\n\n def setUp(self):\n super().setUp()\n u1 = User(firstname=\"Fernando\", lastname=\"Alonso\", email=\"fernando.alonso@mclaren.com\")\n self.q1 = Question(title=\"What is the fatest car?\", body=\"Which team should I chose to win the F1 world championship?\", user=u1, creator_id=self.request_user.id)\n tag = Tag(label='JavaScript')\n self.db.add(u1)\n self.db.add(tag)\n self.db.add(self.q1)\n self.db.commit()\n\n def tearDown(self):\n self.db.query(Tag).delete()\n self.db.query(Question).delete()\n self.db.query(User).delete()\n self.db.commit()\n super().tearDown()\n\n def test_valid_data(self):\n\n # Prepare data\n data = get_valid_data()\n\n # Call\n response = self.fetch(URI.format(id=self.q1.id), method='PUT', body=json.dumps(data))\n body = self.response_dict(response)\n\n # Check status code\n self.assertEqual(200, response.code)\n\n # Check returned data\n returned_data = body['data']\n self.assertIn('title', returned_data)\n self.assertEqual(data['title'], returned_data['title'])\n self.assertIn('body', returned_data)\n self.assertEqual(data['body'], returned_data['body'])\n","repo_name":"william57m/mix-answer","sub_path":"server/tests/api/questions/test_put.py","file_name":"test_put.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"21014159948","text":"from datetime import timedelta\n\nimport pytz\nfrom flask import current_app as app\n\nfrom detectors import DetectionTarget\nfrom detectors import dtm\nfrom models import TaskTable\nfrom tasks import TaskHandlerBase\nfrom tasks import TaskProgress\nfrom tasks.running import RunningTaskHandler\nfrom utils.scan import get_safe_url\nfrom utils.scan import validate_host\n\n\nclass PendingTaskHandler(TaskHandlerBase):\n def __init__(self):\n super().__init__(TaskProgress.PENDING.name)\n\n def add(self, scan):\n app.logger.info(\"Try to enqueue into {}: scan={}\".format(self.progress, scan))\n detector = dtm.load_detector(scan[\"detection_module\"], None)\n\n if detector.TARGET_TYPE == DetectionTarget.HOST.value:\n validate_host(scan[\"target\"])\n elif detector.TARGET_TYPE == DetectionTarget.URL.value:\n scan[\"target\"] = get_safe_url(scan[\"target\"])\n\n # Avoid concurrent scanning for the same target\n task_query = TaskTable.select().where(TaskTable.target == scan[\"target\"])\n if task_query.count() > 0:\n app.logger.info(\n \"Abandoned to enqueue scan={} because another scan for '{}' is still running\".format(\n scan[\"id\"], scan[\"target\"]\n )\n )\n return None\n\n session = detector.create()\n task = {\n \"audit_id\": scan[\"audit_id\"],\n \"scan_id\": scan[\"id\"],\n \"scan_uuid\": scan[\"uuid\"],\n \"target\": scan[\"target\"],\n \"scheduled_at\": scan[\"scheduled_at\"],\n \"max_duration\": scan[\"max_duration\"],\n \"detection_module\": scan[\"detection_module\"],\n \"detection_mode\": scan[\"detection_mode\"],\n \"session\": session,\n \"progress\": TaskProgress.PENDING.name,\n }\n task = TaskTable(**task)\n task.save()\n app.logger.info(\"Enqueued into {} successfully: scan={}\".format(self.progress, scan[\"id\"]))\n return task\n\n def process(self, task):\n # Cancel if detector stays pending for a long time\n created_at = task[\"created_at\"].replace(tzinfo=pytz.utc)\n if self.now > (created_at + timedelta(hours=app.config[\"SCAN_MAX_PENDING_DURATION_IN_HOUR\"])):\n raise Exception(\"Cancelled: detector stayed pending for a long time\")\n\n # Check if detector is ready to scan\n detector = dtm.load_detector(task[\"detection_module\"], task[\"session\"])\n if detector.is_ready():\n # Enqueue the task to running queue\n RunningTaskHandler().add(task)\n","repo_name":"nishimunea/NT-D","sub_path":"core/tasks/pending.py","file_name":"pending.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"9203346425","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom ..style.formats import _thousand_sep\nfrom ..style.style import params\nfrom ..style.titles import _titles\nfrom ..style.template import _header, _footer\nfrom ..utils.utils import _limiter, _scaler\nfrom ..style.sizer import _sizer\n\n\ndef scat(data,\n x,\n y,\n hue=None,\n size=None,\n palette='default',\n style='astetik',\n dpi=72,\n title='',\n sub_title='',\n x_label='',\n y_label='',\n legend=True,\n x_scale='linear',\n y_scale='linear',\n x_limit=None,\n y_limit=None,\n outliers=False,\n save=False):\n\n '''SCATTER PLOT\n\n Well actually, in comparison to a traditional scatter plot, there is\n are some limimations here. Observations may overlap, and sizing is\n possible. 'x' and 'hue' should not be continuous variables\n ('size' can be stepped). If you want basic scatter plot, use regs() and\n also see swarm() and strip() depending on your data.\n\n 1. USE\n ======\n p = scat(data=df,\n x='Class',\n y='Fare',\n hue='Survived',\n size='Rand',\n palette='default',\n style='astetik')\n\n 2. PARAMETERS\n =============\n 2.1 INPUT PARAMETERS\n --------------------\n data :: pandas dataframe\n\n x :: x-axis data\n\n y :: y-axis data\n\n hue :: color highlight (categorical or boolean)\n\n size :: the size of the dots in the plot (continuous or stepped)\n\n --------------------\n 2.2. PLOT PARAMETERS\n --------------------\n None\n\n ----------------------\n 2.3. COMMON PARAMETERS\n ----------------------\n palette :: One of the hand-crafted palettes:\n 'default'\n 'colorblind'\n 'blue_to_red'\n 'blue_to_green'\n 'red_to_green'\n 'green_to_red'\n 'violet_to_blue'\n 'brown_to_green'\n 'green_to_marine'\n\n Or use any cmap, seaborn or matplotlib\n color or palette code, or hex value.\n\n style :: Use one of the three core styles:\n 'astetik' # white\n '538' # grey\n 'solarized' # sepia\n\n Or alternatively use any matplotlib or seaborn\n style definition.\n\n dpi :: the resolution of the plot (int value)\n\n title :: the title of the plot (string value)\n\n sub_title :: a secondary title to be shown below the title\n\n x_label :: string value for x-axis label\n\n y_label :: string value for y-axis label\n\n x_scale :: 'linear' or 'log' or 'symlog'\n\n y_scale :: 'linear' or 'log' or 'symlog'\n\n x_limit :: int or list with two ints\n\n y_limit :: int or list with two ints\n\n outliers :: Remove outliers using either 'zscore' or 'iqr'\n\n '''\n\n # PLOT SPECIFIC START >>>\n if hue != None:\n n = len(data[hue].unique())\n else:\n n = 1\n legend = False\n\n if size == None:\n size = 8\n else:\n size = _sizer(data[size])\n # <<< PLOT SPECIFIC ENDS\n\n # HEADER STARTS >>>\n palette = _header(palette, style, n_colors=n, dpi=dpi) # NOTE: y exception\n # <<< HEADER ENDS\n\n # # # # # # PLOT CODE STARTS # # # # # #\n p, ax = plt.subplots(figsize=(params()['fig_width'],\n params()['fig_height']))\n p = sns.stripplot(data=data,\n x=x,\n y=y,\n hue=hue,\n palette=palette,\n linewidth=1,\n size=size)\n # # # # # # PLOT CODE ENDS # # # # # #\n\n # SCALING AND LIMITS STARTS >>>\n if x_scale != 'linear' or y_scale != 'linear':\n _scaler(p, x_scale, y_scale)\n\n if x_limit != None or y_limit != None:\n _limiter(data=data, x=x, y=y, x_limit=x_limit, y_limit=y_limit)\n\n # START OF TITLES >>>\n _titles(title, sub_title=sub_title)\n _thousand_sep(p, ax, data, x, y)\n _footer(p, x_label, y_label, legend, n, save)\n","repo_name":"autonomio/astetik","sub_path":"astetik/plots/scat.py","file_name":"scat.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"4"} +{"seq_id":"30802042659","text":"\"\"\"\nAward Budget Cuts\n\nThe awards committee of your alma mater (i.e. your college/university) asked for your assistance with a budget allocation problem they’re facing. \nOriginally, the committee planned to give N research grants this year. However, due to spending cutbacks, the budget was reduced to newBudget dollars \nand now they need to reallocate the grants. The committee made a decision that they’d like to impact as few grant recipients as possible by applying \na maximum cap on all grants. Every grant initially planned to be higher than cap will now be exactly cap dollars. \nGrants less or equal to cap, obviously, won’t be impacted.\n\nGiven an array grantsArray of the original grants and the reduced budget newBudget, \nwrite a function findGrantsCap that finds in the most efficient manner a cap such that the least number of recipients is impacted \nand that the new budget constraint is met (i.e. sum of the N reallocated grants equals to newBudget).\n\n\ninput: grantsArray = [2, 100, 50, 120, 1000], newBudget = 190\n\noutput: 47 # and given this cap the new grants array would be\n # [2, 47, 47, 47, 47]. Notice that the sum of the\n # new grants is indeed 190\n \n\"\"\"\n\n\n# O(nlogn) time\n# O(1) space \ndef find_grants_cap(grantsArray, newBudget): \n n = len(grantsArray)\n grantsArray.sort()\n \n # Initiate variables to track how much of the budget is left\n # and how many grants left to cover\n amount_budget_left = float(newBudget)\n count_grants_left = n\n for i in range(n):\n money_req = grantsArray[i] * count_grants_left\n if money_req >= amount_budget_left:\n # Case 1: we'd need more money than we have left\n # meaning we need to set the cap at this point\n cap = amount_budget_left / count_grants_left\n return cap\n # Case 2: we have more money left to allocate\n # so we don't set the cap yet\n amount_budget_left -= grantsArray[i]\n count_grants_left -= 1\n \n return newBudget\n","repo_name":"ritakalach/pramp-solutions","sub_path":"code/award_budget_cuts.py","file_name":"award_budget_cuts.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"4630260715","text":"# Usage: python merged2output.py [state] [Options]\n# 'state' is a lowercase name of a US State\n# Options:\n# None -> create the .idx, .json, .novert.json files\n# -idx -> create the state's .idx file\n# -json -> create the state's .json file\n# -novert -> create the state's .novert.json file\n# -readable -> create a .idx.json that contains the data that is encoded in the .idx\n# (will also recreate the .idx file)\n# -all -> create all 4 file types\n\n\nimport pickle\nimport io\nimport json\nimport struct\nimport pandas as pd\nimport geopandas\nimport logging\nimport os\nimport sys\nimport hashlib\nimport time\nimport csv\nimport zlib\nfrom shapely.geometry import mapping\n\nfrom util import (\n # Constants\n STATEPARSER_CACHE_LOCATION,\n OUTPUT_PREFIX,\n OUTPUT_IDX_LOCATION,\n OUTPUT_JSON_LOCATION,\n MAGIC_NUMBER,\n STATEKEY_LOCATION,\n LOGMODE,\n\n # Functions\n parseState\n)\nMERGED_DF_INPUT = STATEPARSER_CACHE_LOCATION + '{state}.state.pk'\nSHP_OUTPUT = OUTPUT_PREFIX + '{state}/shp/'\n\nARGUMENTS = set(['-idx', '-json', '-novert', '-readable', '-districts', '-shp'])\n\n# .idx data formats\n\"\"\"\nKey:\nUsed:\n > -> big endian\n B -> unsigned char -> 1 byte\n I -> unsigned int -> 4 bytes\n Q -> u-long long -> 8 bytes\nOthers:\n h -> short -> 2 bytes\n i -> int -> 4 bytes\n l -> long -> 4 or 8 bytes\n q -> long long -> 8 bytes\n d -> double -> 8 bytes\n < -> little endian\n\"\"\"\nENDIAN = '>'\nHEADER_F = ENDIAN + 'IQQBBII' # 18 bytes\nNODE_RECORD_F = ENDIAN + 'II' # 8 bytes\nNODE_ID_F = ENDIAN + 'I' # 4 bytes\nAREA_F = ENDIAN + 'I' # 4 bytes\nNEIGHBOR_F = NODE_ID_F # 4 bytes\nDEMOGRAPHICS_F = ENDIAN + 'IIIIII' # 24 bytes\n\n#Used to break up header for checksum calculation\nHEADER1_F = ENDIAN + 'II' # Just magic num, checksum doesnt need reformatting packing \nHEADER2_F = ENDIAN + 'BBII'\n\n\ndef readLastArtifact(state: str):\n \"Load the previous artifact into memory\"\n with io.open(MERGED_DF_INPUT.format(state=state), 'rb') as handle:\n payload = pickle.load(handle)\n return payload\n\ndef initializeOutput(state):\n \"Create the output directory defined in util.py if it doesn't exist\"\n\n logging.info(\"Initializing Output\")\n if not os.path.isdir(OUTPUT_PREFIX):\n logging.info(f\"Creating {OUTPUT_PREFIX}\")\n os.mkdir(OUTPUT_PREFIX)\n \n outloc = OUTPUT_PREFIX + f\"{state}/\"\n if not os.path.isdir(outloc):\n logging.info(f\"Creating {outloc}\")\n os.mkdir(outloc)\n\ndef getNeighbors(df):\n \"Returns a 2D list that stores a list of neighbors for each precinct\"\n geo = df['geometry'].tolist()\n \n neighbors = [[] for i in range(len(geo))]\n for i in range(len(geo)):\n for j in range(i):\n if geo[i].touches(geo[j]):\n neighbors[i].append(j)\n neighbors[j].append(i)\n \n return neighbors\n\ndef getPolyCoords(geo):\n \"Returns a tuple of x,y coords from a POLYGON in the form ((x1,y1),...,(xn,yn))\"\n coordsList = []\n for i in range(len(geo)):\n coordsList.append(mapping(geo[i])[\"coordinates\"][0])\n return coordsList\n\n#Unused\ndef getVertexStructList(vertList):\n \"Returns a list of byte structs that each contain a coordinate (x,y)\"\n vertices = []\n for v in vertList:\n vertices.append(struct.pack(VERTEX_F, float(v[0]), float(v[1])))\n return vertices\n\ndef getNeighborStructList(neighborsList):\n \"Returns a list of byte structs that each contain a neighbor GEOID\"\n neighbors = [struct.pack(NEIGHBOR_F, int(n)) for n in neighborsList]\n return neighbors\n\ndef packDemograpchics(prec):\n \"Returns a byte structs that each contains the demographic data for the precinct\"\n # Sum otherpop and add data in correct order\n otherpop = prec['otherPop'] + prec['pacisPop'] + prec['multiPop']\n packed = struct.pack(DEMOGRAPHICS_F, int(prec['totalPop']),\n int(prec['blackPop']),\n int(prec['nativeAPop']),\n int(prec['asianPop']),\n int(prec['whitePop']),\n int(otherpop))\n readable = [int(prec['totalPop']),\n int(prec['blackPop']),\n int(prec['nativeAPop']),\n int(prec['asianPop']),\n int(prec['whitePop']),\n int(otherpop)]\n\n return packed, readable\n\ndef calcNodeSize(numN):\n \"Returns the size of the node record in bytes\"\n IDSize = struct.calcsize(NODE_ID_F)\n areaSize = struct.calcsize(AREA_F)\n neighborsSize = struct.calcsize(NEIGHBOR_F) * numN\n demoSize = struct.calcsize(DEMOGRAPHICS_F)\n return IDSize + areaSize + neighborsSize + demoSize\n\ndef calcCheckSum(state):\n \"Calculates a checksum to be included in the data header\"\n prev = 0\n with open(OUTPUT_IDX_LOCATION.format(state=state)+'.temp', 'rb') as idx:\n while True:\n data = idx.read(2**20)\n if not data:\n break\n prev = zlib.crc32(data, prev)\n\n idx.close()\n\n return prev\n\ndef getStateMeta(state):\n state = state[:1].upper() + state[1:]\n\n stateKeys = csv.reader(open(STATEKEY_LOCATION))\n for row in stateKeys:\n if state == row[2]:\n return row[1], int(row[4]), int(row[0])\n\n return None\n\ndef getTimeDiff(start):\n return round(time.time()-start, 1)\n\ndef toIdx(df, state: str, stCode: str, numDistricts: int, readable=False):\n \"Formats and outputs a .idx from the data in the dataframe\"\n # Get lists of neighbors for each precinct\n neighborsLists = getNeighbors(df)\n\n # Used to store records for printing later\n nodeRecords = []\n nodes = []\n\n # In case the user wants readable output for testing\n readableRecs = []\n readableNodes = []\n\n # To keep track of position of node records, cumulative length of previous records\n nodePos = 0\n\n for index, precinct in df.iterrows():\n # Pack node #[index]'s data\n\n # Create and Store Node Record\n numNeighbors = len(neighborsLists[index])\n nodeRecord = struct.pack(NODE_RECORD_F, numNeighbors, nodePos)\n nodeRecords.append(nodeRecord)\n\n # Pack node data\n nodeID = struct.pack(NODE_ID_F, int(index))\n area = struct.pack(AREA_F, precinct.land + precinct.water)\n\n # neighbor_id #1 - neighbor_id #n_4h\n neighborsPacked = getNeighborStructList(neighborsLists[index])\n\n # demographics\n demoPacked, readableDemo = packDemograpchics(precinct)\n\n # Create and Store Node\n node = [nodeID] + [area] + neighborsPacked + [demoPacked]\n nodes.append(node)\n\n \n\n if (readable):\n readableRecs.append((int(index), numNeighbors, nodePos))\n readableNodes.append((int(index), \n precinct.land + precinct.water,\n neighborsLists[index],\n readableDemo))\n\n # recalculate nodePos for next record\n nodePos = nodePos + calcNodeSize(numNeighbors)\n\n #END OF FORLOOP\n\n # Create second half of State Header, missing magic_number, checksum\n numNodes = len(df)\n header2 = struct.pack(HEADER2_F, ord(stCode[0]), ord(stCode[1]), numNodes, numDistricts)\n\n # Output Struct Byte data to idx file\n tempOut = open(OUTPUT_IDX_LOCATION.format(state=state)+'.temp', 'wb')\n tempTotal = 0\n tempTotal += tempOut.write(header2)\n for record in nodeRecords:\n tempTotal += tempOut.write(record)\n for node in nodes: # will be list of entries (id, area, neighbor1, ..., demographics)\n for data in node:\n tempTotal += tempOut.write(data)\n tempOut.close()\n\n # Calculate and pack checksum\n checkSum = calcCheckSum(state)\n header1 = struct.pack(HEADER1_F, MAGIC_NUMBER, checkSum)\n\n # Write Out new header including magic_num and checksum\n idxOut = open(OUTPUT_IDX_LOCATION.format(state=state), 'wb')\n idxTotal = 0\n idxTotal += idxOut.write(header1)\n\n #write out everything else that was stored in temp\n with open(OUTPUT_IDX_LOCATION.format(state=state)+'.temp', 'rb') as tempIn:\n data = tempIn.read()\n idxTotal += idxOut.write(data)\n tempIn.close()\n\n logging.info(f\"Finished writing {idxTotal} bytes to {state}.idx\")\n\n # Remove temp\n os.remove(OUTPUT_IDX_LOCATION.format(state=state)+'.temp')\n\n # print readable .idx.json\n if(readable):\n logging.info(f\"Writing to \" + OUTPUT_IDX_LOCATION.format(state=state) + '.json')\n written = readableIDX(state, checkSum, stCode, numNodes, numDistricts, readableRecs, readableNodes)\n logging.info(f\"Finished writing {written} bytes to {state}.idx.json\")\n \n\ndef readableIDX(state, checkSum, stCode, numNodes, numDistricts, nodeRecords, nodesList):\n records = []\n for rec in nodeRecords:\n record = {\n \"nodeID\": rec[0],\n \"numNeighbors\": rec[1],\n \"nodePos\": rec[2]\n }\n records.append(record)\n nodes = []\n for n in nodesList:\n node = {\n \"nodeID\": n[0],\n \"area\": n[1],\n \"neighbors\": n[2],\n \"demographics\": {\n \"totalPop\": n[3][0],\n \"blackPop\": n[3][1],\n \"nativeAPop\": n[3][2],\n \"asianPoP\": n[3][3],\n \"whitePop\": n[3][4],\n \"otherPop\": n[3][5]\n }\n }\n nodes.append(node)\n header = {\n \"magic_num\": hex(MAGIC_NUMBER),\n \"checkSum\": hex(checkSum),\n \"stCode\": stCode,\n \"numNodes\": numNodes,\n \"numDistricts\": numDistricts,\n \"node_records\": records,\n \"nodes\": nodes\n }\n\n with open(OUTPUT_IDX_LOCATION.format(state=state)+'.json', \"w\") as outfile:\n return outfile.write(json.dumps(header, indent = 4))\n\n\ndef toJSON(df, state: str, stCode: str, maxDistricts: int, fips: int, includeV=True):\n\n # Convert each precinct's POLYGON into a list of (x,y) coordinates\n coordLists = getPolyCoords(df.geometry)\n\n precincts = []\n for index, prec in df.iterrows():\n precName = prec['name']\n precID = index\n\n vertices = []\n if includeV:\n for v in coordLists[index]:\n coord = {\n \"lat\": float(v[1]),\n \"lng\": float(v[0])\n }\n vertices.append(coord)\n\n precinctEntry = {\n \"name\": precName,\n \"id\": precID,\n \"vertices\": vertices,\n }\n precincts.append(precinctEntry)\n\n dictionary = {\n \"state\": stCode,\n \"maxDistricts\": maxDistricts,\n \"fips\": fips,\n \"precincts\": precincts\n }\n\n json_loc = OUTPUT_JSON_LOCATION.format(state=state)\n if not includeV:\n json_loc = json_loc[:-5]+'.novert.json'\n\n with open(json_loc, \"w\") as outfile:\n return outfile.write(json.dumps(dictionary, indent = 4))\n\ndef checkArgs(args) :\n \"Checks that all arguments passed in are valid, returns only the valid ones in a set\"\n clean = []\n if args == None or len(args) == 0:\n return None\n for arg in args:\n if arg == '-all':\n return set([arg])\n if arg not in ARGUMENTS:\n print(\"Unknown argument: \" + arg)\n else:\n clean.append(arg)\n if len(clean) == 0:\n return None\n return set(clean) \n\ndef toJSONDict(df, state, stCode):\n mapping = []\n for index, prec in df.iterrows():\n # mapping.append(\n # {\"Precinct:\": int(index), \"District\": prec['district']}\n # )\n mapping.append([int(index), prec['district']])\n output = {\n \"state\": stCode,\n \"map\": mapping\n }\n districtsLoc = OUTPUT_JSON_LOCATION.format(state=state)[:-5]+'.districts.json'\n with open(districtsLoc, \"w\") as outfile:\n return outfile.write(json.dumps(output, indent = 4))\n\n\ndef toSHP(df, state):\n shpDir = SHP_OUTPUT.format(state=state)\n if not os.path.isdir(shpDir):\n logging.info(f\"Creating {shpDir}\")\n os.mkdir(shpDir)\n geodf = geopandas.GeoDataFrame(df, geometry='geometry')\n geodf.to_file((SHP_OUTPUT + '{state}.shp').format(state=state))\n \n \ndef main(args):\n \"Creates the output .idx and json files from the cleaned and merged dataframe\"\n startTime = time.time()\n\n # Get state\n state = args[0]\n logging.info(f\"Outputing data for state: \" + state)\n \n\n # Check args\n args = checkArgs(set(args[1:]))\n\n # Get metadata\n stCode, numDistricts, fips= getStateMeta(state)\n logging.info(f\"Retrieved state {state}\")\n\n # Load in merged data\n logging.info(f\"Loading in the artifact: \" + MERGED_DF_INPUT.format(state=state))\n df = readLastArtifact(state)\n logging.info(f\"Successfully loaded artifact\")\n\n #initialize output directory\n initializeOutput(state)\n\n # Output to .shp file\n if (args != None and ('-shp' in args or '-all' in args)):\n toSHP(df, state)\n\n # Output to .idx file\n if (args != None and ('-all' in args or '-readable' in args)):\n logging.info(f\"Writing to \" + OUTPUT_IDX_LOCATION.format(state=state))\n written = toIdx(df, state, stCode, numDistricts, True)\n elif (args == None or '-idx' in args):\n logging.info(f\"Writing to \" + OUTPUT_IDX_LOCATION.format(state=state))\n written = toIdx(df, state, stCode, numDistricts)\n \n\n # Output to .JSON file\n if (args == None or '-json' in args or '-all' in args):\n logging.info(f\"Writing to \" + OUTPUT_JSON_LOCATION.format(state=state))\n written = toJSON(df, state, stCode, numDistricts, fips)\n logging.info(f\"Finished writing {written} bytes to {state}.json\")\n\n if (args == None or '-novert' in args or '-all' in args):\n # Chang where to write to\n logging.info(f\"Writing to \" + OUTPUT_JSON_LOCATION.format(state=state)[:-5]+'.novert.json')\n written = toJSON(df, state, stCode, numDistricts, fips, False)\n logging.info(f\"Finished writing {written} bytes to {state}.novert.json\")\n\n if (args == None or '-districts' in args or '-all in args'):\n logging.info(f\"Writing to \" + OUTPUT_JSON_LOCATION.format(state=state)[:-5]+'.districts.json')\n written = toJSONDict(df, state, stCode)\n logging.info(f\"Finished writing {written} bytes to {state}.districts.json\")\n\n logging.info(f\"Finished writing {state} output in {getTimeDiff(startTime)} seconds\\n\\n\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename='merged2output.log', level=logging.INFO, filemode=LOGMODE)\n main(sys.argv[1:] if len(sys.argv) >= 2 else None)\n\n","repo_name":"project-rakan/bladecaller","sub_path":"gis2idx/merged2output.py","file_name":"merged2output.py","file_ext":"py","file_size_in_byte":14823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26859089081","text":"import numpy as np\r\nimport numba as nb\r\nimport pylab as pl\r\nimport cv2\r\nimport ediff\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\np_low, p_high = -0.1, 0.1\r\nsize = 128\r\nnp_dtype = np.float32\r\n\r\npattern_list = np.array((\r\n (1, 0, 0),\r\n (0, 1, 0),\r\n (0, 0, 1),\r\n (0, 0, 0),\r\n (1, 1, 1),\r\n), dtype = np_dtype)\r\n'''\r\nkernel = np.array((\r\n (0, 0, 7),\r\n (3, 5, 1),\r\n), dtype = np_dtype)\r\nc_h_krnl = 0\r\nc_w_krnl = 1\r\nkernel /= np.sum(kernel)\r\n'''\r\nkernel = np.array((\r\n (0, 0, 0, 7, 5),\r\n (3, 5, 7, 5, 3),\r\n (1, 3, 5, 3, 1),\r\n), dtype = np_dtype)\r\nkernel /= np.sum(kernel)\r\nc_h_krnl = 0\r\nc_w_krnl = 2\r\n\r\nkernel *= 0.5\r\nprint(\"Load Image...\")\r\nh, w, c = size, size, 3\r\nassert c == 3\r\n\r\nprint(\"Generate noise_tex...\")\r\n#t_filter = sp.firwin(9, 0.5 - 1e-5, window = \"nuttall\", pass_zero = False, scale = True, nyq = 1.0)\r\nnoise_tex = np.random.uniform(low = p_low, high = p_high, size = (h, w, c)).astype(np_dtype)\r\n#p_tex = conv_2d(p_tex, t_filter)\r\n\r\nprint(\"Add noise...\")\r\n#img += noise_tex\r\n\r\nprint(\"Do...\")\r\nkernel = kernel.repeat(3).reshape(*kernel.shape, 3)\r\n\r\n@nb.njit(parallel=True, fastmath=True)\r\ndef do():\r\n out = np.zeros((256 // 4, 256 // 4, len(pattern_list)), dtype = np_dtype)\r\n for ii in nb.prange(256 // 4):\r\n i = ii * 4\r\n for jj in nb.prange(256 // 4):\r\n j = jj * 4\r\n v = i / 255\r\n vv = j / 255\r\n iv = np.array([v, vv, 0], dtype = np_dtype)\r\n blk = np.empty((size, size, 3), dtype = np_dtype)\r\n blk[:] = (iv)\r\n print(i)\r\n img_pattern_idx = ediff.do_error_diffusion_pattern_mse_pidx(blk, kernel, c_h_krnl, c_w_krnl, pattern_list)\r\n for i_pattern in range(len(pattern_list)):\r\n out[ii, jj, i_pattern] = np.mean(img_pattern_idx == i_pattern)\r\n return out\r\nout = do()\r\n\r\nX, Y = np.meshgrid(np.arange(0, 256, 4), np.arange(0, 256, 4))\r\nfor i, color in enumerate(pattern_list):\r\n fig = pl.figure()\r\n ax = fig.gca(projection = '3d')\r\n pl.title(str(color))\r\n ax.plot_surface(X, Y, out[:, :, i])\r\n#pl.legend()\r\npl.show()","repo_name":"tuxzz/hico-online-toolkit","sub_path":"ediff-dev/v2/ediff_style.py","file_name":"ediff_style.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72623466085","text":"class Solution:\n def islandPerimeter(self, grid: List[List[int]]) -> int:\n \n M, N, result = len(grid), len(grid[0]), 0\n \n for i in range(M):\n for j in range(N):\n if grid[i][j] == 1:\n result += sum([\n grid[i+1][j] == 0 if 0 <= i+1 < M else 1,\n grid[i-1][j] == 0 if 0 <= i-1 < M else 1,\n grid[i][j+1] == 0 if 0 <= j+1 < N else 1,\n grid[i][j-1] == 0 if 0 <= j-1 < N else 1,\n ])\n \n return result\n \n \n \n \n\"\"\"\nGreedy -> find first island\n\nDFS w/ visited to stop duplicate stuff\n\ncheck if a coording is a perimiter or not\n -> i.e, count the number of zeros the coordinate is touching \n\"\"\"","repo_name":"josharnoldjosh/LeetCodeSolutions","sub_path":"island-perimeter/island-perimeter.py","file_name":"island-perimeter.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17617690829","text":"from telepot import Bot, exception\nfrom telepot.loop import MessageLoop\nimport cv2\n\n\nclass Buffer:\n\n def __init__(self, _type, image):\n self._type = _type\n self._image = image\n\n def read(self):\n s, img = cv2.imencode(self._type, self._image)\n return img.tobytes()\n\n\nclass Telegram:\n\n def __init__(self, configs: dict):\n self.bot = Bot(configs['token'])\n self._default_chat_id = configs['default_chat_id']\n self._accepted_names = configs['accepted_names']\n self._bot_name = configs['bot_name']\n\n self.last_message = None\n MessageLoop(self.bot, self._handle).run_as_thread()\n\n def _handle(self, msg):\n if msg:\n chat_id = msg['chat']['id']\n first_name = msg['from']['first_name']\n message = str(msg.get('text', ''))\n first_world = message[:len(self._bot_name)] if len(message) > len(self._bot_name) else ''\n\n print(f\"[-] ({chat_id}: ') >> {first_name} sent: {message}\")\n\n if first_world.lower() == self._bot_name:\n message = message[len(self._bot_name):]\n print(f'message to monica >> {message}')\n self._last_message = message.strip().lower()\n\n def send_photo(self, image, name: str = 'photo.jpg', _type: str = '.JPG', chat_id: int = None):\n if not chat_id:\n chat_id = self._default_chat_id\n self.bot.sendPhoto(chat_id, (name, Buffer(_type=_type, image=image)))\n\n def send_message(self, text: str, chat_id: int = None):\n if not chat_id:\n chat_id = self._default_chat_id\n try:\n self.bot.sendMessage(chat_id, text)\n except exception.TelegramError:\n print(f'[-] Chat not found: {chat_id}')\n\n def send_bool_question(self, question: str, chat_id: int = None):\n attempts = 3\n self.last_message = None\n self.send_message(text=question, chat_id=chat_id)\n while attempts > 0:\n if self.last_message:\n attempts -= 1\n if self.last_message in ['sim', 'pode']:\n return True\n elif self.last_message in ['nao', 'nao pode']:\n return False\n else:\n self.send_message(text=\"Por favor, responda com 'sim' ou 'nao'.\", chat_id=chat_id)\n self.last_message = None\n return False\n","repo_name":"caio-cacador/OpenDoorWithFaceRecognition","sub_path":"OpenDoorWithFaceRecognition/telegram/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5318236289","text":"#!/usr/bin/python3\n\"\"\"Create a Class Square\"\"\"\n\n\nclass Square:\n\n \"\"\"A class that defines a square\"\"\"\n\n def __init__(self, size=0):\n\n \"\"\"__init__ method\n\n Args:\n size (int): Description of size\n \"\"\"\n if size is not None:\n try:\n if type(size) is not int:\n raise TypeError\n if size < 0:\n raise ValueError\n self._Square__size = size\n except TypeError:\n print(\"size must be an integer\")\n except ValueError:\n print(\"size must be >= 0\")\n\n def area(self):\n return self._Square__size * self._Square__size\n","repo_name":"aicha652/alx-higher_level_programming","sub_path":"0x06-python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21664039936","text":"from django.shortcuts import render, redirect\r\nfrom . forms import ContactForm\r\nfrom django.contrib import messages\r\n\r\n# Create your views here.\r\ndef contact(request):\r\n context = {'title' : \"Africa Disability Alliance\"}\r\n\r\n \r\n form = ContactForm()\r\n if request.method == 'POST':\r\n # create a form instance and populate it with data from the request:\r\n form = ContactForm(request.POST)\r\n # check whether it's valid:\r\n if form.is_valid():\r\n # subject = form.cleaned_data['subject']\r\n # email = form.cleaned_data['email']\r\n # name = form.cleaned_data['name']\r\n # message = form.cleaned_data['message']\r\n form.save()\r\n\r\n \r\n\r\n # process the data in form.cleaned_data as required\r\n # ...\r\n # redirect to a new URL:\r\n messages.success(request,f\"Your information has been submitted\")\r\n return redirect('contact')\r\n\r\n # if a GET (or any other method) we'll create a blank form\r\n else:\r\n form = ContactForm()\r\n # context['form'] = ContactForm()\r\n\r\n # return render(request, 'name.html', {'form': form})\r\n \r\n return render(request,\"contact/contact.html\",{'form': form})","repo_name":"Carrington-dev/disability_test","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32759222191","text":"import pygame as pg\nfrom math import cos, sin, ceil, pi, log10\nfrom src.models.legs import Legs\nfrom src.util.physics import collide, dist_between, angles_between\nfrom src.util.settings import HEIGHT, MODEL_COLORS, OUT_OF_BOUNDS, WIDTH, MAX_SIZE, MIN_SIZE\n\nclass Creature:\n def __init__(self, num_parts, pos, size, max_parts, num_pair_legs, leg_length):\n self.num_parts = num_parts\n self.head = pos\n self.z_pos = pos[2]\n self.skeleton = []\n self.size = size\n self.max_parts = max_parts\n self.legs = Legs(num_pair_legs=num_pair_legs, \n leg_length=leg_length, )\n self.build_skeleton(pos)\n self.give_legs()\n \n def build_skeleton(self, pos, a=0, upright=False):\n self.skeleton = []\n for i in range(self.num_parts):\n self.skeleton.append([pos[0]-(i+1)*2*self.size, pos[1], pos[2], a])\n \n if upright:\n self.upright()\n \n def give_wings(self):\n self.legs.transform_leg(self.legs.free_leg(), 'wing', 1)\n\n def give_arms(self):\n self.legs.transform_leg(self.legs.free_leg(), 'arm', 1)\n\n def give_legs(self):\n if self.legs.num_pair_legs == 0:\n return\n if self.legs.num_pair_legs>self.num_parts:\n return\n \n ratio_body_to_legs = ceil(self.num_parts/(self.legs.num_pair_legs+1))\n\n if self.legs.num_pair_legs==self.num_parts:\n ratio_body_to_legs = 1\n \n self.legs.attached_segments = []\n for i in range(len(self.skeleton)):\n if i%ratio_body_to_legs==0:\n self.legs.attached_segments.append(i)\n self.legs.leg_types.append({\n 'type': 'leg',\n 'level': 1,\n })\n self.legs.build_legs(self.skeleton[i])\n if self.legs.num_legs()==self.legs.num_pair_legs:\n break\n \n self.upright()\n\n def update_legs(self):\n if self.legs.num_pair_legs == 0:\n return\n if self.legs.num_pair_legs>self.num_parts:\n return\n \n ratio_body_to_legs = self.num_parts/self.legs.num_pair_legs\n\n # first add new legs\n for i in range(self.legs.num_pair_legs-len(self.legs.attached_segments)):\n self.legs.attached_segments.insert(0, 0)\n self.legs.leg_types.insert(0, {\n 'type': 'leg',\n 'level': 1,\n })\n\n # then update how they are connected\n for i in range(self.legs.num_pair_legs):\n self.legs.attached_segments[i] = int(i*ratio_body_to_legs)\n self.legs.build_legs(self.skeleton[self.legs.attached_segments[i]])\n\n self.upright()\n\n def change_body(self, change_in_size):\n self.size+=change_in_size\n if self.size MAX_SIZE:\n self.size = MIN_SIZE\n self.num_parts+=1\n new_pos = [self.head[0], self.head[1], self.z_pos]\n if self.num_parts > self.max_parts:\n self.num_parts = self.legs.num_pair_legs\n self.build_skeleton(new_pos, upright=True)\n self.update_legs() # TODO: change to a different one \n return round(log10(self.max_parts*MAX_SIZE))\n \n self.build_skeleton(new_pos, upright=True)\n return 0\n\n def increase_body_potential(self):\n self.max_parts+=1\n\n def change_legs(self, type):\n if type == 'new':\n self.legs.num_pair_legs += 1\n self.update_legs()\n return\n \n existing_leg_index = -1\n for i in range(len(self.legs.leg_types)-1, -1, -1):\n if self.legs.leg_types[i]['type'] == 'leg' and self.legs.leg_types[i]['level'] < 3:\n existing_leg_index = i\n break\n \n if existing_leg_index != -1:\n self.legs.leg_types[existing_leg_index]['level'] += 1\n\n self.upright()\n\n def render(self, screen, camera):\n\n # if is_moving:\n # t = pg.time.get_ticks()\n # self.wiggle(t)\n\n x, y = camera.transform_to_screen(self.head[0:3])\n if x>WIDTH+OUT_OF_BOUNDS or x<-OUT_OF_BOUNDS or y>HEIGHT+OUT_OF_BOUNDS or y<-OUT_OF_BOUNDS:\n return False \n pg.draw.circle(screen, MODEL_COLORS['head'], (x, y), self.size)\n for i in range(self.num_parts):\n x, y = camera.transform_to_screen(self.skeleton[i][0:3])\n pg.draw.circle(screen, MODEL_COLORS['skeleton'], (x, y), self.size)\n pg.draw.circle(screen, MODEL_COLORS['hurt_box'], (x, y), self.size, 1)\n\n # if is_moving:\n # self.dewiggle(t)\n\n self.legs.draw(screen, self.skeleton, camera)\n return True\n\n def move(self, pos, effects, is_moving):\n self.head = pos\n if self.skeleton:\n dist = dist_between(self.skeleton[0], self.head)\n angle = angles_between(self.skeleton[0], self.head)['z']\n self.skeleton[0][0]+=(dist-2*self.size)*cos(angle)\n self.skeleton[0][1]+=(dist-2*self.size)*sin(angle)\n self.skeleton[0][3] = angle\n\n for i in range(1, len(self.skeleton)):\n dist = dist_between(self.skeleton[i], self.skeleton[i-1])\n angle = angles_between(self.skeleton[i], self.skeleton[i-1])['z']\n self.skeleton[i][0]+=(dist-2*self.size)*cos(angle)\n self.skeleton[i][1]+=(dist-2*self.size)*sin(angle)\n self.skeleton[i][3] = angle\n\n self.legs.move_feet(self.skeleton, effects)\n\n if is_moving:\n self.wiggle(pg.time.get_ticks())\n\n def wiggle(self, t):\n wiggle_mag = 0.25\n if self.skeleton:\n start = self.legs.get_torso_start()\n period = (len(self.skeleton) - start)\n for i in range(start, len(self.skeleton)):\n perp_offset = wiggle_mag*sin(pi/period*i)*cos(t/100)\n self.skeleton[i][0]+=perp_offset*cos(self.skeleton[i][3]+pi/2)\n self.skeleton[i][1]+=perp_offset*sin(self.skeleton[i][3]+pi/2)\n\n def upright(self):\n torso_segment = self.legs.get_torso_start()\n for i in range(torso_segment, self.num_parts, 1):\n self.skeleton[i][2] = self.z_pos\n for i in range(torso_segment, -1, -1):\n self.skeleton[i][2]=self.z_pos+self.size*2*(torso_segment-i)\n self.head[2]=self.z_pos+self.size*2*(torso_segment+1)\n\n def collide(self, hurt_boxes):\n\n hit_box = [self.head[0], self.head[1], self.head[2], self.size]\n for hurt_box in hurt_boxes:\n if collide(hit_box, hurt_box):\n return True\n\n for i in range(len(self.skeleton)):\n segment = self.skeleton[i]\n hit_box = [segment[0], segment[1], segment[2], self.size]\n for hurt_box in hurt_boxes:\n if collide(hit_box, hurt_box):\n return True\n","repo_name":"HuMangoPP/hello_universe","sub_path":"src/models/creature.py","file_name":"creature.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40783220411","text":"import mpi4py\nmpi4py.rc.initialize = False\nmpi4py.rc.finalize = False\nfrom mpi4py import MPI\n\nstart_time = MPI.Wtime()\nMPI.Init()\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nsum = 0\ncal_number = int(1000/size)\nstart_value = rank * cal_number\nfor i in range(1, cal_number+1):\n sum = sum + start_value + i\ntotal_sum = comm.gather(sum, root=0)\nif rank==0:\n total = 0\n for num in total_sum:\n total = total + num\n print(\"Sum: {}\".format(total))\nMPI.Finalize()\nend_time = MPI.Wtime()\nprint(\"Id:{}, Time useage:{}\".format(rank, end_time - start_time))\n\n","repo_name":"Happy-Grass/CFD","sub_path":"codes/mpi_sum.py","file_name":"mpi_sum.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21209365384","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport cProfile\nfrom qldsmashparser import *\nimport datetime\n\n\nTOP_PADDING = 10\nLEFT_PADDING = 10\nROW_PADDING = 5\nCOLUMN_PADDING = 10\nLINE_WIDTH = 2\nCELL_PADDING = 2\nLINE_COLOUR = 'black'\nHEADING_COLOUR = 'black'\nCELL_COLOUR = 'black'\n\ndef autosize_font(width, height, text):\n point_size = 1\n font = ImageFont.truetype('Ubuntu-Medium.ttf', size=point_size)\n size = font.getsize(text)\n while size[0] < width and size[1] < height:\n font = ImageFont.truetype('Ubuntu-Medium.ttf', size=point_size)\n size = font.getsize(text)\n point_size += 1\n return font\n\ndef draw_text_vertically(source, text, font, position, fill):\n size = font.getsize(text)\n temp = Image.new('RGBA', size)\n draw = ImageDraw.Draw(temp)\n draw.text((0, 0), text, fill, font)\n rotated = temp.rotate(90, expand=True)\n source.paste(rotated, position, rotated)\n return rotated.size\n\ndef generate_colour_for_number(value, scale_upper_bound=100):\n \"\"\"\n This will just be a scale between red and green and\n non-numeric values being gray because I said so\n This'd be a whole lot easier if I could use the HSV colour space tbh\n \"\"\"\n try:\n number = float(value)\n except ValueError:\n return (224, 224, 224, 255)\n scale = number / scale_upper_bound\n red = int(255 - (255 * scale))\n green = int(255 * scale)\n blue = 0\n return (red, green, blue, 255)\n\ndef draw_table(table, colour_scale=None):\n \"\"\"\n Yeah I know, I need to break this up a bit into smaller pieces\n Anyway, this actually draws the thing, and table better be\n a dict where the values are other dicts\n If you use colour_scale, it should be a number, it's for making colours\n in the background of cells that scale along with the numerical value\n of that cell, so if your values are 0-5 put 5 and if they're 0-100 put\n 100 and I can't be bothered testing for any numbers being negative and\n what that would do\n \"\"\"\n \n img = Image.new('RGBA', (5000, 5000), 'white')\n canvas = ImageDraw.Draw(img)\n heading_font = ImageFont.truetype('Ubuntu-Medium.ttf', size=20)\n \n left_border = 0\n box_height = 0\n row_line_offsets = []\n #Draw lines across the image at this Y position (offset\n #from where the top heading ends) (later)\n row_positions = {}\n row_heights = {}\n for key in table.keys():\n size = canvas.textsize(key, heading_font)\n if size[0] > left_border:\n left_border = size[0]\n row_heights[key] = (ROW_PADDING * 3) + size[1]\n box_height += ROW_PADDING\n row_positions[key] = box_height\n box_height += (ROW_PADDING * 2) + size[1]\n row_line_offsets.append(box_height)\n left_border += LEFT_PADDING + COLUMN_PADDING\n\n top_border = 0\n for key, value in table.items():\n for inner_key, inner_value in value.items():\n size = heading_font.getsize(inner_key)\n if size[0] > top_border:\n #Using the text width since it will be rotated\n top_border = size[0]\n top_border += TOP_PADDING + ROW_PADDING\n\n canvas.line([\n (left_border, 0),\n (left_border, box_height + top_border - ROW_PADDING)\n ], LINE_COLOUR, LINE_WIDTH)\n\n column_positions = {}\n last_column_position = left_border\n for key, value in table.items():\n for inner_key, inner_value in value.items():\n size = heading_font.getsize(inner_key)\n if inner_key in column_positions:\n position = column_positions[inner_key]\n else: \n column_positions[inner_key] = position = last_column_position\n draw_text_vertically(img, inner_key, heading_font,\n (last_column_position, TOP_PADDING), HEADING_COLOUR)\n last_column_position += size[1]\n position += size[1] + COLUMN_PADDING\n canvas.line([\n (position, 0), \n (position, box_height + top_border - ROW_PADDING)\n ], LINE_COLOUR, LINE_WIDTH)\n last_column_position = position\n try:\n text = str(round(float(inner_value), 2))\n #I can't be stuffed importing the decimal package to remove trailing zeros\n text = text.rstrip('0').rstrip('.') if '.' in text else text\n except ValueError:\n text = inner_value\n text_font = autosize_font((size[1] + COLUMN_PADDING - (LINE_WIDTH * 1.5))-(CELL_PADDING * 2),\n row_heights[key] - (CELL_PADDING * 2) - (LINE_WIDTH * 1.5) - ROW_PADDING, text)\n text_x = column_positions[inner_key] + CELL_PADDING\n if (text_font.getsize(text)[0] + CELL_PADDING) < size[1]:\n text_x += size[1] - (text_font.getsize(text)[0] + CELL_PADDING)\n text_position = (text_x, row_positions[key] + top_border + CELL_PADDING)\n if colour_scale is not None:\n background_x = column_positions[inner_key] + LINE_WIDTH\n background_y = ((row_positions[key] + top_border) - CELL_PADDING) - LINE_WIDTH\n background_width = ((size[1] + COLUMN_PADDING) - LINE_WIDTH * 1.5)\n background_height = row_heights[key]\n background_box = [(background_x, background_y), \n (background_x + background_width, background_y + background_height)]\n background_colour = generate_colour_for_number(inner_value, colour_scale)\n canvas.rectangle(background_box, background_colour)\n canvas.text(text_position, text, CELL_COLOUR, text_font)\n canvas.text((0, row_positions[key] + top_border), key, HEADING_COLOUR, heading_font)\n\n for offset in row_line_offsets:\n canvas.line([\n (0, top_border + offset),\n (last_column_position, top_border + offset)\n ], LINE_COLOUR, LINE_WIDTH)\n\n canvas.line([(0, top_border), (last_column_position, top_border)], LINE_COLOUR, LINE_WIDTH)\n\n return img.crop((0, 0, last_column_position, box_height + top_border - ROW_PADDING))\n\ndef main():\n #PR season starts at RotA 19 apparently (7 Dec 2016)\n table = get_act_matchup_table('set_win_rate', datetime.date(2016, 12, 7))\n image = draw_table(table, 100)\n image.save('output_win_rate.png')\n\nif __name__ == \"__main__\":\n #main()\n cProfile.run('main()')","repo_name":"Miss-Inputs/qldsmash-parser","sub_path":"qldsmashparser/draw_table.py","file_name":"draw_table.py","file_ext":"py","file_size_in_byte":6600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16461706454","text":"from data import db_session, users, cart, items_db\n\nfrom flask import Flask, render_template, redirect, request\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\nfrom wtforms.validators import DataRequired\n\nimport sqlite3\nimport io\nimport requests\nimport os\n\n\n#авторизация\nclass LoginForm(FlaskForm):\n email = StringField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\n#регистрация\nclass RegisterForm(FlaskForm):\n email = StringField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[DataRequired()])\n name = StringField('Имя пользователя', validators=[DataRequired()])\n about = TextAreaField(\"Немного о себе\")\n submit = SubmitField('Войти')\n\n\n#основной код\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n#получение информации из бд\nconnection = sqlite3.connect(\"db/data_base.db\")\ncur = connection.cursor()\ncategoriez = cur.execute(\"\"\"SELECT * FROM Categories\"\"\").fetchall()\nitemz = cur.execute(\"\"\"SELECT * FROM Items\"\"\").fetchall()\n\n\n# обновление информации о предметах\nitems = dict()\ndef reload_items():\n global items\n connection = sqlite3.connect(\"db/data_base.db\")\n cur = connection.cursor()\n for category in categoriez:\n items[category[1]] = cur.execute(\"\"\"SELECT * FROM Items WHERE Category IN (SELECT id FROM Categories WHERE Name = ?)\"\"\", (category[1], )).fetchall()\n\n\nreload_items()\n\n\n# изменение кол-ва предметов в бд\ndef inc_item(name, c):\n session = db_session.create_session()\n i = session.query(items_db.Item).filter(items_db.Item.name == name).first()\n if i:\n if i.count+c>=0:\n i.count += c\n session.commit()\n return True\n else:\n session.commit()\n return False\n\n\n# изменение кол-ва предметов в корзине\ndef inc_likes(name, c):\n session = db_session.create_session()\n likes = session.query(cart.Likes).filter(cart.Likes.item_name == name, cart.Likes.user == current_user).first()\n if not likes:\n if c<=0:\n return\n likes = cart.Likes()\n likes.item_name = name\n likes.count = c\n current_user.likes.append(likes) \n session.merge(current_user)\n inc_item(name, -c)\n elif -c >= likes.count:\n connection_2 = sqlite3.connect(\"db/data_base.db\")\n cur_2 = connection_2.cursor()\n n = cur_2.execute(\"\"\"SELECT count, item_name FROM likes WHERE id = ?\"\"\", (likes.id, )).fetchone()\n if n:\n inc_item(n[1], n[0])\n cur_2.execute(\"\"\"DELETE FROM likes WHERE id = ?\"\"\", (likes.id, ))\n connection_2.commit()\n else:\n if inc_item(name, -c):\n likes.count += c \n session.commit()\n\ncategories = []\nfor category in categoriez:\n categories.append([category[2], category[1]])\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n session = db_session.create_session()\n return session.query(users.User).get(user_id)\n\n\n@app.route('/')\ndef root():\n return render_template('home.html', categories=categoriez)\n\n\n# личный кабинет\n@app.route('/my')\ndef my():\n reload_items()\n connection_1 = sqlite3.connect(\"db/data_base.db\")\n cur1 = connection_1.cursor()\n us = cur1.execute(\"\"\"SELECT * FROM Users\"\"\").fetchall() \n me = dict()\n for user in us:\n me[user[0]] = cur1.execute(\"\"\"SELECT * FROM Likes WHERE user_id = ?\"\"\", (user[0],)).fetchall()\n summ = cur1.execute(\"\"\"SELECT sum(likes.count * price) as summ FROM likes, Items WHERE user_id = ? and name = item_name\"\"\", (user[0],)).fetchone()\n try:\n return render_template('my.html', items=itemz, me=me[current_user.id], categories=categoriez, summ=f'{int(summ[0])} руб. {int((summ[0] - int(summ[0]))*100)} коп.')\n except Exception:\n return render_template('my.html', items=itemz, me=me[current_user.id], categories=categoriez, summ='0')\n\n# авторизация\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(\"/\")\n return render_template('login.html', message=\"Неправильный логин или пароль\", form=form)\n return render_template('login.html', title='Авторизация', form=form)\n\n\n# добавление в корзину\n@app.route('/add//', methods=['GET', 'POST'])\ndef add(cat, name):\n # session = db_session.create_session()\n # likes = session.query(l.Likes).filter(l.Likes.item_name == pic, l.Likes.user == current_user).first()\n # if not likes:\n # likes = l.Likes()\n # likes.item_name = pic\n # likes.count = 1\n # current_user.likes.append(likes) \n # session.merge(current_user)\n # else:\n # likes.count += 1 \n # session.commit()\n # inc_item(pic, -1)\n inc_likes(name, 1)\n return redirect(f'/item/{cat}/{name}')\n\n\n# уменьшение кол-ва предмета на 1 в корзине\n@app.route('/dec/', methods=['GET', 'POST'])\n@login_required\ndef dec(name):\n inc_likes(name, -1)\n return redirect('/my')\n\n\n# увеличение кол-ва предмета на 1 в корзине\n@app.route('/inc/', methods=['GET', 'POST'])\n@login_required\ndef inc(name):\n inc_likes(name, 1)\n return redirect('/my')\n\n\n# удаление предмета из корзины\n@app.route('/delete//', methods=['GET', 'POST'])\ndef delete(name, c):\n inc_likes(name, -c)\n return redirect('/my')\n\n\n# выход из аккаунта\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n# главная страница\n@app.route('/home')\ndef home():\n return render_template('home.html', categories=categoriez)\n\n\n# страница категории\n@app.route('/category/

')\ndef category(p):\n return render_template('category.html', items=items[p], p=p)\n\n\n# страница предмета\n@app.route('/item/

/')\ndef item(p, pic):\n reload_items()\n return render_template('item.html', items=items[p], p=pic, category=p)\n\n\n# регистрация\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.password_again.data:\n return render_template('register.html', title='Регистрация', form=form, message=\"Пароли не совпадают\")\n session = db_session.create_session()\n if session.query(users.User).filter(users.User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация', form=form, message=\"Такой пользователь уже есть\")\n user = users.User(\n name=form.name.data,\n email=form.email.data,\n about=form.about.data\n )\n user.set_password(form.password.data)\n session.add(user)\n session.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\nif __name__ == '__main__':\n db_session.global_init(\"db/data_base.db\")\n\n # heroku\n\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n\n\n # локальный сервер\n \n # app.run(port=8080, host='127.0.0.1', debug=True)","repo_name":"Tematikys/store","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70570937125","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\ninputs = list(map(int, input().split()))\nstack = [0]\noutputs = [-1 for _ in range(n)]\n\nfor i in range(1, n):\n # stack을 pop하는 반복문\n while stack and inputs[stack[-1]] < inputs[i]:\n outputs[stack[-1]] = inputs[i]\n stack.pop()\n stack.append(i)\n i += 1\n\nfor i in outputs:\n print(i, end=' ')\n","repo_name":"jeanP-tech/Algorithms","sub_path":"[17298] 오큰수.py","file_name":"[17298] 오큰수.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12300790335","text":"from common import chrome_proxy_measurements as measurements\nfrom telemetry import benchmark\n\n\nclass ChromeProxyBenchmark(benchmark.Benchmark):\n @classmethod\n def AddCommandLineArgs(cls, parser):\n parser.add_option(\n '--extra-chrome-proxy-via-header',\n type='string', dest=\"extra_header\",\n help='Adds an expected Via header for the Chrome-Proxy tests.')\n\n @classmethod\n def ProcessCommandLineArgs(cls, parser, args):\n if args.extra_header:\n measurements.ChromeProxyValidation.extra_via_header = args.extra_header\n\n","repo_name":"kiwibrowser/src","sub_path":"tools/chrome_proxy/common/chrome_proxy_benchmark.py","file_name":"chrome_proxy_benchmark.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"23763254159","text":"#!/usr/bin/env python3\n\ndef bitcheck(bit):\n '''Takes a bitwise flag and checks it for strandedness. \\\nAssumes read is mapped (otherwise returns None) and data are single-stranded. \\\nReturns \"+\" or \"-\" depending on strand.'''\n # Check if read is mapped, if not, return None\n if (bit &4) == 4:\n return None\n # First define strand as + strand\n strand = \"+\"\n # Check if bit flag 16 is true, if so, read is on - strand, so change\n if ((bit & 16) == 16):\n strand = \"-\"\n return strand\n","repo_name":"UO-BGMP/deduper-louislamont","sub_path":"bitwise function.py","file_name":"bitwise function.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26521483319","text":"import warnings\nfrom Model.Normal_model import ClassificationParameterTuning\nfrom Model.Normal_model import RgressionParameterTuning\nfrom sklearn import datasets\n\n\n\ndef run_master(model,features,labels):\n if model=='Classification':\n classfier=ClassificationParameterTuning.Modelchoice(features,labels)\n classfier.classfierChoice()\n elif model=='Regression':\n regressor=RgressionParameterTuning.ModelChoice(features,labels)\n regressor.regressorChoice()\n else:\n print(\"please choice the correct model\")\n\n\nif __name__ == '__main__':\n warnings.filterwarnings('ignore')\n\n dataTemp = datasets.load_iris()\n features = dataTemp.data\n label = dataTemp.target\n\n run_master('Classification',features,label)\n","repo_name":"duanluyun/Rec_Recall_Rerank_Model","sub_path":"Example/run_normal_model.py","file_name":"run_normal_model.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15950256845","text":"# URLs\n# file manually added\n# your_project_name_here/apps/your_app_name_here/urls.py\nfrom django.conf.urls import url\nfrom . import views\n\n# Here we are going to list ONLY the list of URL - Regex --\n\nurlpatterns = [\n url(r'^forma$', views.forma),\n url(r'^hello$', views.sayhello),\n url(r'bye$', views.saygoodbye),\n # would only match localhost:8000/bears\n url(r'^bears$', views.one_method),\n # would match localhost:8000/bears/23\n url(r'^bears/(?P\\d+)$', views.another_method),\n # would match localhost:8000/bears/pooh/poke\n url(r'^bears/(?P\\w+)/poke$', views.yet_another),\n # would match localhost:8000/17/brown\n url(r'^(?P[0-9]+)/(?P\\w+)$', views.one_more),\n]\n","repo_name":"MineEnriquez/django_projects","sub_path":"helloworld/apps/app_2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19359457765","text":"# -*- coding: utf-8 -*-\n# @Time  : 2019/3/6 上午9:08\n# @Author : NewmanZhou\n# @Project : news_all\n# @FileName: cnenergynews_gn_sj_spider.py\n\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Rule\nfrom news_all.spider_models import NewsRCSpider\n\n\nclass CnenergynewsGNSJSpipder(NewsRCSpider):\n name = 'cnenergynews_gn_sj_spider'\n\n # 中国能源报 ==》 国内 ==》时间\n mystart_urls = {\n 'http://www.cnenergynews.cn/yw/': 569, # '国内 时间'\n 'http://www.cnenergynews.cn/yq/sy/': 571, # '石油 时间'\n 'http://www.cnenergynews.cn/xny_183/': 572, # '新能源 时间'\n }\n # http://www.cnenergynews.cn/xwzt/2019lhzt/lhtt/201903/t20190304_753302.html\n rules = (Rule(LinkExtractor(allow=r'cnenergynews.cn/.*?\\d+.html', deny=('video', 'audio'),\n restrict_xpaths='//div[@class=\"main4_left\"]/div[position()=2]'),\n callback='parse_item', follow=False),\n )\n\n def parse_item(self, response):\n xp = response.xpath\n try:\n title = xp(\"//div[@class='xltitle']/text()\").extract()[0]\n pubtime = xp(\"//div[@class='xltimer']/span[@class='xltimerl']/text()\").extract()[0].strip()\n origin_name = xp(\"//div[@class='xltimer']/span[@class='xltimerl']/span[@class='laiyuan']/span/text()\").extract_first('')\n cv = xp(\"//div[@class='xlcontent']\")[0]\n content, media, video, cover = self.content_clean(cv, need_video=False)\n except:\n return self.produce_debugitem(response, \"xpath error\")\n\n return self.produce_item(\n response=response,\n title=title,\n pubtime=pubtime,\n origin_name=origin_name,\n content=content,\n media=media\n )\n","repo_name":"Pintrue/news_all","sub_path":"news_all/spiders_four/cnenergynews_gn_sj_spider.py","file_name":"cnenergynews_gn_sj_spider.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74412373925","text":"import uuid\n\nimport gcs_utils\nfrom kubeflow import fairing\n\nCONTAINER_REGISTRY = 'kangwoo'\n\nnamespace = 'admin'\njob_name = f'tensorflow-mnist-gcs-job-{uuid.uuid4().hex[:4]}'\n\ncommand = [\"python\", \"tensorflow_mnist.py\", \"--model_path\", \"gs://kfp-bucket/mnist/model\"]\noutput_map = {\n \"Dockerfile\": \"Dockerfile\",\n \"tensorflow_mnist.py\": \"tensorflow_mnist.py\"\n}\nfairing.config.set_preprocessor('python', command=command, path_prefix=\"/app\", output_map=output_map)\n\nfairing.config.set_builder('docker', registry=CONTAINER_REGISTRY, image_name=\"tensorflow-mnist\",\n dockerfile_path=\"Dockerfile\")\n\nfairing.config.set_deployer('job', namespace=namespace, job_name=job_name,\n pod_spec_mutators=[\n gcs_utils.gcp_credentials(secret_name='gcp-secret')\n ],\n cleanup=False, stream_log=True)\n\nfairing.config.run()\n","repo_name":"kangwoo/kubeflow-introduction","sub_path":"09-serving/kfserving/storage/gcs/fairing_local_docker.py","file_name":"fairing_local_docker.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41467970578","text":"from typing import Callable\n\nfrom PySide6 import QtWidgets\nfrom sas.qtgui.Perspectives.ParticleEditor.UI.AxisButtonsUI import Ui_AxisSelection\nfrom sas.qtgui.Perspectives.ParticleEditor.UI.PlaneButtonsUI import Ui_PlaneSelection\n\nx_angles = (90, 0)\ny_angles = (0, 90)\nz_angles = (0, 0)\n\n\nclass PlaneButtons(QtWidgets.QWidget, Ui_PlaneSelection):\n \"\"\" XY, XZ, YZ plane selection buttons, sets angles \"\"\"\n\n def __init__(self, set_angles_function: Callable[[float, float], None]):\n super().__init__()\n self.setupUi(self)\n\n self.setAngles = set_angles_function\n\n self.selectXY.clicked.connect(lambda: self.setAngles(*z_angles))\n self.selectYZ.clicked.connect(lambda: self.setAngles(*x_angles))\n self.selectXZ.clicked.connect(lambda: self.setAngles(*y_angles))\n\n\n\nclass AxisButtons(QtWidgets.QWidget, Ui_AxisSelection):\n \"\"\" X, Y, Z axis selection buttons, sets angles \"\"\"\n\n def __init__(self, set_angles_function: Callable[[float, float], None]):\n super().__init__()\n self.setupUi(self)\n\n self.setAngles = set_angles_function\n\n self.selectX.clicked.connect(lambda: self.setAngles(*x_angles))\n self.selectY.clicked.connect(lambda: self.setAngles(*y_angles))\n self.selectZ.clicked.connect(lambda: self.setAngles(*z_angles))\n\n","repo_name":"SasView/sasview","sub_path":"src/sas/qtgui/Perspectives/ParticleEditor/ViewerButtons.py","file_name":"ViewerButtons.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"41642352524","text":"\nfrom random import randint\n\ndef my_ri(a, b=None):\n if not b:\n b = a\n a = 0\n return int(random(a, b+1))\n\n\n \nfor i in range(10):\n println(\"randomint: {}\".format(randint(0,3)))\n println(\"my random int: {}\".format(my_ri(3)))\n \n","repo_name":"villares/py.processing-play","sub_path":"random/random_int/random_int.pyde","file_name":"random_int.pyde","file_ext":"pyde","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"25852663234","text":"#!/usr/bin/env python\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django import forms\nfrom django.forms.widgets import Input\n\n\n\nclass TinyColorPickerWidget(Input):\n\tinput_type = 'text'\n\ttemplate_name = 'tinycolorpicker/widget.html'\n\n\tdef __init__(self, attrs=None, image=None, variant=None):\n\t\tsuper(TinyColorPickerWidget, self).__init__(attrs=attrs)\n\t\tself.image = image or getattr(settings, 'TINYCOLORPICKER_IMAGE', 'tinycolorpicker/img/text-color.png')\n\t\tself.variant = variant or getattr(settings, 'TINYCOLORPICKER_VARIANT', 'vanilla')\n\n\n\tdef get_context(self, name, value, attrs):\n\t\tcontext = super(TinyColorPickerWidget, self).get_context(name, value, attrs)\n\t\tcontext['widget']['image'] = self.image\n\t\tcontext['widget']['variant'] = self.variant\n\t\treturn context\n\n\n\t@property\n\tdef media(self):\n\t\tif self.variant == 'vanilla':\n\t\t\tif settings.DEBUG:\n\t\t\t\tjs = ['tinycolorpicker/js/tinycolorpicker.js',]\n\t\t\telse:\n\t\t\t\tjs = ['tinycolorpicker/js/tinycolorpicker.min.js',]\n\t\telif self.variant == 'jquery':\n\t\t\tif settings.DEBUG:\n\t\t\t\tjs = ['tinycolorpicker/js/jquery.tinycolorpicker.js',]\n\t\t\telse:\n\t\t\t\tjs = ['tinycolorpicker/js/jquery.tinycolorpicker.min.js',]\n\t\telse:\n\t\t\traise ImproperlyConfigured(\"TINYCOLORPICKER_VARIANT must be one of 'vanilla' or 'jquery'\")\n\n\t\tcss = ['tinycolorpicker/css/tinycolorpicker.css',]\n\t\treturn forms.Media(css={'all': css}, js=js)\n","repo_name":"perdixsw/django-tinycolorpicker","sub_path":"tinycolorpicker/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72943958884","text":"import torch\nimport torch.nn as nn\nimport torchvision.datasets as dsets\nfrom torch.autograd import Variable\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ntorch.set_default_tensor_type(\"torch.DoubleTensor\")\n\n#Setting Parameters\ninput = 1024\nhidden = 1024\nclassNum = 9\nnumEpochs = 15\nbatchSIZE = 128\nlearningRate = 0.01\n\n''' --------------------------------------------------------------------------------------------'''\n''' Setup the train data, validation data, train labels and validation labels '''\n'''---------------------------------------------------------------------------------------------'''\n\n#Setup train dataset\nX = np.load(\"X.npy\")\nY = np.load(\"Y.npy\")\n\n\nX = X.astype(np.int64)\nY = Y.astype(np.int64)\n\nX = X[:,np.newaxis,:,:]\n\n# Split the train data and valid data\ntrainData, validData, trainLabel, validLabel = train_test_split(X, Y, test_size=.2)\n\n# Covert the numpy array to tensor\ntorchXtrain = torch.from_numpy(trainData).type(torch.DoubleTensor)\ntorchYtrain = torch.from_numpy(trainLabel)\n\ntorchXvalid = torch.from_numpy(validData).type(torch.DoubleTensor)\ntorchYvalid = torch.from_numpy(validLabel)\n\n#Combine the data and labels\ntrain = torch.utils.data.TensorDataset(torchXtrain,torchYtrain)\nvalid = torch.utils.data.TensorDataset(torchXvalid,torchYvalid)\n\n#Setup the loader for batch training and validation\ntrainLoader = torch.utils.data.DataLoader(train, batch_size = batchSIZE, shuffle = True)\nvalidLoader = torch.utils.data.DataLoader(valid, batch_size = batchSIZE, shuffle = True)\n\n\n''' --------------------------------------------------------------------------------------------'''\n''' The frame of MLP network '''\n'''---------------------------------------------------------------------------------------------'''\n# MLP Model\nclass MLP(nn.Module):\n def __init__(self, input, hidden, classNum):\n super(MLP, self).__init__()\n self.Fc1 = nn.Linear(input, hidden)\n self.relu = nn.ReLU() #activation function:Relu\n self.Fc2 = nn.Linear(hidden, classNum)\n\n def forward(self, x):\n out = self.Fc1(x)\n out = self.relu(out)\n out = self.Fc2(out)\n return out\n\n\nmlp = MLP(input, hidden, classNum)\n\n''' --------------------------------------------------------------------------------------------'''\n''' Train the MLP network '''\n'''---------------------------------------------------------------------------------------------'''\n\n# Train the Model\nfor epoch in range(numEpochs):\n lossFc = nn.CrossEntropyLoss() # calculate the loss\n optimizer = torch.optim.Adam(mlp.parameters(), lr=learningRate) # Adam optimizer\n for x, (trainData, labels) in enumerate(trainLoader):\n\n trainData = Variable(trainData.view(-1, 32*32))\n labels = Variable(labels-1)\n\n optimizer.zero_grad()\n outputs = mlp(trainData)\n loss = lossFc(outputs, labels)\n loss.backward() #backpropagation\n optimizer.step()\n #print iteration times as well as the loss\n if (x + 1) % 5 == 0:\n print('Epoch: ', epoch + 1, '/', numEpochs, ', Progress: ', x + 1, '/', 65, ', Loss: ', np.array(loss.data[0]))\n\n''' --------------------------------------------------------------------------------------------'''\n''' Validation and accuracy '''\n'''---------------------------------------------------------------------------------------------'''\n\n#Validation function\nnumCorrect = 0\nnumTotal = 0\nfor testData, labels in validLoader:\n testData = Variable(testData.view(-1, 32 * 32))\n outputs = mlp(testData)\n _, predicted = torch.max(outputs.data, 1)\n ones = torch.ones(predicted.shape).type(torch.int64)\n predicted = predicted+ones\n numTotal += labels.size(0)\n numCorrect += (predicted.cpu() == labels).sum()\n\nprint('Validation accuracy of the model: %d %%' % (100 * numCorrect / numTotal))\n\n''' --------------------------------------------------------------------------------------------'''\n''' Save the trained parameter '''\n'''---------------------------------------------------------------------------------------------'''\n#save mlp model\ntorch.save(mlp.state_dict(), 'mlp.pkl')\n\n","repo_name":"cyx01293/EEL5840-Fundamentals-of-Machine-Learning","sub_path":"project01/mlp_train.py","file_name":"mlp_train.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39668497769","text":"import sqlite3\n\nconn=sqlite3.connect(r'C:\\Users\\dell\\Desktop\\dbeaver\\weed')\n\n\ncursor=conn.cursor()\n\n\ncursor.execute('SELECT * FROM weeds')\n\nresults=cursor.fetchall()\n\n\n\nfor row in results:\n print(row)\n\n\n\ncursor.close()\nconn.close()\n","repo_name":"ghanshyam20/some_stuffs","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2572784452","text":"import os\nimport time\nfrom datetime import datetime\nimport requests\nimport urllib\nimport gc\nfrom requests.adapters import HTTPAdapter\nfrom urllib3 import Retry\nfrom bs4 import BeautifulSoup\n\nSITE_URL = \"https://www.leopold.co.kr/Shop/Item.php?ItId=1512545919\"\nTELE_API_URL = \"https://api.telegram.org/bot{0}/sendMessage?chat_id={1}&text={2}\"\nDIR = r\"./\"\nTOKEN_FILE_NAME = r\"TELEGRAM_TOKEN.txt\"\nUSER_LIST_FILE_NAME = r\"USER_LIST.txt\"\nCHECK_INTERVAL_IN_SECONDS = 15 * 60 # 15 minutes\nINTERVAL_AFTER_SENT_IN_SECONDS = 24 * 60 * 60 # one day\n\ndef send_to_telegram(token, text_to_send, user_id_list):\n parsed_text = urllib.parse.quote_plus(text_to_send)\n for chat_id in user_id_list:\n requests.get(TELE_API_URL.format(token, chat_id, parsed_text))\n\ndef check_lines(s, token, sent_flag, user_id_list):\n # get response\n try:\n site_response = s.get(SITE_URL)\n\n site = BeautifulSoup(site_response.content, 'html.parser')\n\n text = site.find('table').find_all('tr')[1].text\n\n if \"품절\" in text:\n return False\n\n if not sent_flag:\n text_to_send = '=' * 25 + '\\n' + \"막 용두 알리미\\n\\n\\n 막 용두 재고 찼음!!!\\n\\n\" + '=' * 25 + '\\n\\n' + SITE_URL + '\\n'\n for _ in range(5):\n send_to_telegram(token, text_to_send, user_id_list)\n return True\n except:\n pass\n\n return False\n\nif __name__ == \"__main__\":\n with requests.Session() as s:\n # fetch telegram user tokens\n user_id_list = []\n\n with open(DIR + USER_LIST_FILE_NAME, \"r\") as file:\n for line in file.readlines():\n user_id_list.append(line.strip())\n\n with open(DIR + TOKEN_FILE_NAME, \"r\") as file:\n token = file.readline().strip()\n\n sent_flag = False\n\n while True:\n sent_flag = check_lines(s, token, sent_flag, user_id_list)\n\n if sent_flag:\n time.sleep(INTERVAL_AFTER_SENT_IN_SECONDS)\n else:\n time.sleep(CHECK_INTERVAL_IN_SECONDS)\n","repo_name":"KRMing/LeopoldStabilizerNotifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32515623679","text":"import rospy\nimport threading\nfrom .observer import Observer\n\n\nclass PeriodicObserver(Observer):\n def __init__(self, rate, **kwargs):\n super(PeriodicObserver, self).__init__(**kwargs)\n if rate is None or rate <= 0:\n raise ValueError(\"invalid rate\")\n self._rate = rospy.Rate(rate)\n self._worker = threading.Thread(target=self._dispatch_event, name=\"current_goal_observer\")\n self._worker.start()\n\n def join(self):\n self._worker.join()\n\n def _dispatch_event(self):\n while not rospy.is_shutdown():\n self._call_event(msg=None)\n try:\n self._rate.sleep()\n except rospy.ROSInterruptException as e:\n rospy.logdebug(\"PeriodicObserver: {}\".format(e))\n","repo_name":"ymd-stella/stella_nav","sub_path":"stella_nav_observer/src/stella_nav_observer/periodic_observer.py","file_name":"periodic_observer.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18538136375","text":"# 안녕 S2\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nL = [0] + list(map(int, input().split()))\nJ = [0] + list(map(int, input().split()))\ndp = [[0] * 101 for i in range(n + 1)]\n\nfor i in range(1, n + 1):\n for j in range(1, 101): # 체력이 1~100인 경우\n if L[i] <= j: # 현재 체력으로 현재 사람을 만날 수 있는 경우\n # i-1번째 dp에서 L[i]만큼 체력을 빠졌을 때 J[i]만큼 기쁨을 선택할 경우와 비교\n dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - L[i]] + J[i])\n else: # 사람을 만나지 않음\n dp[i][j] = dp[i - 1][j]\n\nprint(dp[n][99])\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"다이나믹 프로그래밍/1535.py","file_name":"1535.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26950250397","text":"import nltk\nimport operator\nimport numpy\nfrom processing import X_train, Y_train, X_test\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n\n\nstopwords = set(nltk.corpus.stopwords.words(\"english\"))\nextra_stopwords = [\".\", \",\", \":\", \";\", \"``\", \"''\", \"'\", \"%\", \"-\", \"$\", \"(\", \")\"]\nfor char in extra_stopwords:\n stopwords.add(char)\n\n# Initialize lemmatizer\nlemmatizer = nltk.stem.WordNetLemmatizer()\n\n# vocabulary = []\n\n# function to get most n frequent words in the full dataset, which include all 5 categories, excluding stopwords\ndef get_frequent_words(article_list, n):\n frequency_dic = {}\n # print(len(article_list))\n # print(article_list.loc[0])\n # print(article_list.loc[1])\n for i in article_list.index:\n article = article_list.loc[i, \"content\"]\n # print(article)\n for sentence in nltk.tokenize.sent_tokenize(article):\n for token in nltk.tokenize.word_tokenize(sentence):\n word = lemmatizer.lemmatize(token).lower()\n if word in stopwords: continue\n if word in frequency_dic:\n frequency_dic[word] += 1\n else:\n frequency_dic[word] = 1\n sorted_list = sorted(frequency_dic.items(), key=operator.itemgetter(1), reverse=True)\n # print(sorted_list)\n if len(sorted_list) <= n:\n print(\"no {} words in list, the number of words is: {}\".format(n, len(sorted_list)))\n return sorted_list\n else:\n return sorted_list[:n]\n\n\n# function to get vector according to vocabulary from articles\n# feature 1 refers to get the number n most frequent words from all categories\ndef get_feature1_vector(vocabulary, article):\n vector = numpy.zeros(len(vocabulary))\n words = []\n for sentence in nltk.tokenize.sent_tokenize(article):\n for token in nltk.tokenize.word_tokenize(sentence):\n words.append(lemmatizer.lemmatize(token).lower())\n\n for i, word in enumerate(vocabulary):\n if word in words:\n vector[i] = words.count(word)\n return vector\n\n\n# Function to get train data for feature 1\n# feature 1 refers to get the number n most frequent words from all categories\ndef get_feature1_train_data(n):\n vocabulary = []\n\n words = get_frequent_words(X_train, n)\n for word, frequency in words:\n if word not in vocabulary:\n vocabulary.append(word)\n\n X_vector = []\n Y_vector = []\n for i in X_train.index:\n X_vector.append(get_feature1_vector(vocabulary, X_train.loc[i, \"content\"]))\n Y_vector.append(Y_train.loc[i])\n return X_vector, Y_vector, vocabulary\n\n\ndef get_feature2_vector(vocabulary, article):\n vector = numpy.zeros(len(vocabulary))\n words = []\n # get token from the first line of each article as title\n for token in nltk.tokenize.word_tokenize(nltk.tokenize.sent_tokenize(article)[0]):\n words.append(lemmatizer.lemmatize(token).lower())\n for i, word in enumerate(vocabulary):\n if word in words:\n vector[i] = words.count(word)\n return vector\n\ndef get_feature2_train_data(n):\n title_vocabulary = []\n for i in X_train.index:\n article = X_train.loc[i, \"content\"]\n # get token from the first line of each article as title\n for token in nltk.tokenize.word_tokenize(nltk.tokenize.sent_tokenize(article)[0]):\n word = lemmatizer.lemmatize(token).lower()\n if word in stopwords: continue\n if word not in title_vocabulary: title_vocabulary.append(word)\n\n X_vector = []\n Y_vector = []\n for i in X_train.index:\n X_vector.append(get_feature2_vector(title_vocabulary, X_train.loc[i, \"content\"]))\n Y_vector.append(Y_train.loc[i])\n return X_vector, Y_vector, title_vocabulary\n\n\n# feature3: TF-IDF\ntfidf_vector = TfidfVectorizer()\n# learn vocabulary and idf from training set\ntfidf_vector.fit(X_train[\"content\"])\n\n# transform train and test input documents to document-term matrix\n# Uses the vocabulary and document frequencies (df) learned by fit\ntfidf_X_train = tfidf_vector.transform(X_train[\"content\"])\ntfidf_X_test = tfidf_vector.transform(X_test[\"content\"])\n# print(tfidf_X_train)\n# print(tfidf_X_test)","repo_name":"lyzsk/cmt316-courseworks","sub_path":"coursework1/part2/featureengineer.py","file_name":"featureengineer.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17238901349","text":"import json\nimport logging\nimport urllib\nfrom google.appengine.api import urlfetch\nfrom methods.unique import get_temporary_user, USER_AGENT\n\nBASE_URL = 'http://empatika-doubleb.appspot.com'\nBASE_URL_TEST = 'http://empatika-doubleb-test.appspot.com'\n\n\ndef __get_base_url(company):\n if company.test_server:\n return BASE_URL_TEST\n else:\n return BASE_URL\n\n\ndef _get_request(company, path, params=None, log_response=True):\n url = '%s%s' % (__get_base_url(company), path)\n if params:\n url = '%s?%s' % (url, urllib.urlencode(params))\n logging.info(url)\n response = urlfetch.fetch(url, method='GET', deadline=60, headers={'User-Agent': get_temporary_user()[USER_AGENT]})\n logging.info(response.status_code)\n response = json.loads(response.content)\n if log_response:\n logging.info(response)\n return response\n\n\ndef _post_request(company, path, params=None, payload=None, log_response=True):\n url = '%s%s' % (__get_base_url(company), path)\n if params:\n url = '%s?%s' % (url, urllib.urlencode(params))\n logging.info(url)\n if payload:\n payload = {k: unicode(v).encode('utf-8') for k, v in payload.iteritems()}\n payload = urllib.urlencode(payload)\n logging.info('payload = %s' % payload)\n response = urlfetch.fetch(url, method='POST', payload=payload, deadline=60,\n headers={'User-Agent': get_temporary_user()[USER_AGENT]})\n logging.info(response.status_code)\n response = json.loads(response.content)\n if log_response:\n logging.info(response)\n return response\n\n\ndef get_doubleb_venues(company):\n path = '/api/venues.php'\n return _get_request(company, path)\n\n\ndef get_doubleb_payment_types(company):\n path = '/api/payment/payment_types.php'\n return _get_request(company, path)\n\n\ndef get_doubleb_menu(company, client=None):\n path = '/api/menu.php'\n params = {\n 'client_id': client.key.id() if client else None\n }\n return _get_request(company, path, params=params)\n\n\ndef post_doubleb_registration(company, client):\n path = '/api/register'\n payload = {\n 'client_id': client.key.id() if client else None\n }\n return _post_request(company, path, payload=payload)\n\n\ndef post_doubleb_check_order(company, client, venue, items, payment, delivery_time):\n path = '/api/check_order'\n payload = {\n 'client_id': client.key.id() if client else None,\n 'venue_id': venue.key.id(),\n 'payment': json.dumps(payment),\n 'delivery_time': delivery_time,\n 'items': json.dumps(items),\n }\n return _post_request(company, path, payload=payload)\n\n\ndef get_order_id(company):\n path = '/api/order_register.php'\n return _get_request(company, path)['order_id']\n\n\ndef post_doubleb_place_order(company, order, auto_client, doubleb_client, venue, items, payment, delivery_time):\n path = '/api/order.php'\n payload = {\n 'order': json.dumps({\n 'order_id': get_order_id(company),\n 'venue_id': venue.key.id(),\n 'comment': order.comment,\n 'device_type': order.device_type,\n 'delivery_time': delivery_time,\n 'total_sum': order.total_sum,\n 'client': {\n 'id': doubleb_client.key.id(),\n 'name': '%s %s' % (auto_client.name, auto_client.surname),\n 'phone': auto_client.tel,\n 'email': auto_client.email\n },\n 'payment': payment,\n 'items': items\n })\n }\n return _post_request(company, path, payload=payload)\n","repo_name":"lopatinsky/automation-gae","sub_path":"methods/proxy/doubleb/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"219521518","text":"# coding:utf-8\n\nimport socket\n\nhost = ''\nport = 2001\nserver = socket.socket()\n\nserver.bind((host, port))\n\nwhile True:\n server.listen(5)\n connection,addr = server.accept()\n request_size = 1024\n request_str = ''\n while True:\n request_str += connection.recv(request_size)\n if len(connection.recv(request_size)) < 1024:\n break\n\n print('request:{}'.format(request_str.decode('utf-8')))\n\n reponse_str = b'

Hello,zppc welcome you!

'\n connection.sendall(reponse_str)\n connection.close()\n","repo_name":"zppc/pytest","sub_path":"http1.py","file_name":"http1.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29422312070","text":"import json\nimport requests\n\nclass SimpleCrawler:\n init_url = \"https://zhuanlan.zhihu.com/api/columns/pythoneer/followers\"\n offset = 0\n\n def crawl(self, params=None):\n # 必须指定 UA,否则知乎服务器会判定请求不合法\n headers = {\n \"Host\": \"zhuanlan.zhihu.com\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36\"\n }\n\n response = requests.get(self.init_url, headers=headers, params=params)\n print(response.url)\n data = response.json()\n # 7000表示所有关注量\n # 分页加载更多,递归调用\n while self.offset < 7000:\n self.parse(data)\n self.offset += 20\n params = {\"limit\": 20, \"offset\": self.offset}\n self.crawl(params)\n \n def parse(self, data):\n # 以 json 格式储存到文件\n with open(\"followers.json\", \"a\", encoding=\"utf-8\") as f:\n for item in data:\n f.write(json.dumps(item))\n f.write('\\n')\n\nif __name__ == '__main__':\n SimpleCrawler().crawl()\n\n\"\"\"\n1.python扩展包的安装\n\tpip install requests\n 同时安装了 python2 和 python3 时,如果想为 python3 安装扩展包:\n python3 -m pip install requests\n\n2.requests 中文文档:\n http://docs.python-requests.org/zh_CN/latest/user/quickstart.html\n\n3.if __name__ == '__main__':\n 一个python的文件有两种使用的方法,\n 第一是直接作为脚本执行,\n 第二是import到其他的python脚本中被调用(模块重用)执行。\n 因此if __name__ == 'main':\n 的作用就是控制这两种情况执行代码的过程,\n 在if __name__ == 'main': 下的代码只有在第一种情况下(即文件作为脚本直接执行)才会被执行,\n 而import到其他脚本中是不会被执行的。\n\"\"\"","repo_name":"linxiaoru/python-in-action","sub_path":"examples/advanced/crawler/simple_crawler_using_ requests.py","file_name":"simple_crawler_using_ requests.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"44041910618","text":"import sys\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\ninputFile = \"HLA-A-0201_assays.csv\"\noutputFile = \"9-lenght-IC50-nM.txt\"\nenergiesFile = \"energies_{}\".format(outputFile)\nfoldxIE = \"/home/pepamengual/neoantigenos/big_benchmark/foldxIE\"\npydockIE = \"/home/pepamengual/neoantigenos/big_benchmark/pydockIE\"\nthreshold = 20\nthresholdpy = 40\n\npolarity = {'A':'0.83','R':'0.83','N':'0.09','D':'0.64','C':'1.48','E':'0.65','Q':'0','G':'0.1','H':'1.1','I':'3.07','L':'2.52','K':'1.6','M':'1.4','F':'2.75','P':'2.7','S':'0.14','T':'0.54','W':'0.31','Y':'2.97','V':'1.79'}\nheavyatoms = {'A':'6','R':'12','N':'9','D':'9','C':'7','E':'10','Q':'10','G':'5','H':'11','I':'9','L':'9','K':'10','M':'9','F':'12','P':'8','S':'7','T':'8','W':'15','Y':'13','V':'9'}\n#order = [\"Negative\", \"Positive-Low\", \"Positive\", \"Positive-Intermediate\", \"Positive-High\"]\n#colors = ['blue','cyan','green','orange','red']\norder = [\"Negative\", \"Positive-Low\", \"Positive-Intermediate\", \"Positive-High\"]\ncolors = ['blue','cyan','orange','red']\n#order = [\"Positive\"]\n#colors = [\"green\"]\n\npolarityClassifier = {'A':'A','R':'P','N':'P','D':'P','C':'A','E':'P','Q':'P','G':'A','H':'P','I':'A','L':'A','K':'P','M':'A','F':'A','P':'A','S':'P','T':'P','W':'A','Y':'A','V':'A'}\n\nR = 1.9872036e-3\nT = 300\n\ndef DBfilter():\n d = {}\n with open(inputFile, 'r') as f:\n for line in f:\n h_line = line.split(\",\")\n peptide = h_line[1]\n method = h_line[2]\n assay = h_line[3]\n units = h_line[4]\n qualitativeValue = h_line[5]\n quantitativeValue = h_line[6]\n allele = h_line[7].rstrip()\n if len(peptide) == 9:\n if quantitativeValue:\n if assay == \"half maximal inhibitory concentration (IC50)\":\n if units == \"nM\":\n if not peptide in d:\n content = \"{} {}\".format(quantitativeValue, qualitativeValue)\n d[peptide] = content\n else:\n del d[peptide]\n with open(outputFile, 'w') as f:\n for key, value in d.items():\n f.write(\"{} {}\".format(key, value) + '\\n')\n\ndef energyCalculator(outputFile, foldxIE, pydockIE):\n lEnergies = []\n with open(outputFile, 'r') as f:\n for line in f:\n peptide = line.split(\" \")[0]\n peptide = peptide.rstrip()\n polarityPeptide = 0.0\n heavyatomsPeptide = 0\n polarityKeyResidues = float(polarity[peptide[1]]) + float(polarity[peptide[8]])\n ic50 = line.split(\" \")[1]\n ic50 = float(ic50)\n ic50 /= 1e9\n G = R * T * (np.log(ic50))\n G = round(G, 3)\n qualitativeValue = line.split(\" \")[2].rstrip('\\n')\n \n locationEnergyFoldxIE = \"{}/Summary_{}_AC.fxout\".format(foldxIE, peptide)\n with open(locationEnergyFoldxIE, 'r') as ffoldx:\n last_line = ffoldx.readlines()[-1]\n energyFoldx = last_line.split(\"\\t\")[5]\n energyFoldx = round(float(energyFoldx), 3)\n\n locationEnergyPydock = \"{}/{}.ene\".format(pydockIE, peptide)\n with open(locationEnergyPydock, 'r') as fpydock: \n last_line = fpydock.readlines()[-1].rstrip(\"\\n\")\n energyPydock = last_line.split()[4]\n energyPydock = round(float(energyPydock), 3)\n \n results = \"{} {} {} {} {}\".format(peptide, qualitativeValue, G, energyFoldx, energyPydock)\n lEnergies.append(results)\n\n with open(energiesFile, \"w\") as f:\n f.write(\"Peptide qualitativeValue ΔGexp FoldxBindingEnergy PydockBindingEnergy\\n\")\n for k in lEnergies:\n f.write(k + '\\n')\n\n\ndef plotter():\n di = {\"x\": [], \"y\": [], \"p\": []} #FoldX Binding energies lower than -15Kcal/mol\n di2 = {\"x\": [], \"y\": [], \"p\": []} #FoldX Binding energies higher than -15Kcal/mol\n di3 = {\"x\": [], \"y\": [], \"p\": []} #FoldX Binding energies lower than -15Kcal/mol AND Apolar at position 2 or 9\n di4 = {\"x\": [], \"y\": [], \"p\": []} #FoldX Binding energies lower than -15Kcal/mol AND any Apolar at position 2 and 9\n di5 = {\"x\": [], \"y\": [], \"p\": []} #Double polar\n di6 = {\"x\": [], \"y\": [], \"p\": []} #Double apolar\n di7 = {\"x\": [], \"y\": [], \"p\": []} #Some is polar\n \n dipy = {\"x\": [], \"y\": [], \"p\": []}\n di2py = {\"x\": [], \"y\": [], \"p\": []}\n di3py = {\"x\": [], \"y\": [], \"p\": []}\n di4py = {\"x\": [], \"y\": [], \"p\": []}\n di5py = {\"x\": [], \"y\": [], \"p\": []}\n di6py = {\"x\": [], \"y\": [], \"p\": []}\n di7py = {\"x\": [], \"y\": [], \"p\": []}\n\n ed = {} #energydiscarted\n ea = {} #energyaccepted\n dp = {} #doublepolar\n da = {} #doubleapolar\n sp = {} #somepolar\n\n edpy = {} #energydiscarted\n eapy = {} #energyaccepted\n dppy = {} #doublepolar\n dapy = {} #doubleapolar\n sppy = {} #somepolar\n\n\n countdpNoHigh = 0\n countdpHigh = 0\n countdpNegative = 0\n countdpPositive = 0\n\n countdpNoHighpy = 0\n countdpHighpy = 0\n countdpNegativepy = 0\n countdpPositivepy = 0\n\n\n for num, name in enumerate(order):\n with open(energiesFile, \"r\") as f:\n next(f)\n for line in f:\n h_line = line.split(\" \")\n peptide = h_line[0]\n qualitativeValue = h_line[1]\n experimentalEnergy = float(h_line[2])\n foldXBindingEnergy = float(h_line[3])\n PydockBindingEnergy = float(h_line[4])\n if qualitativeValue == name:\n if foldXBindingEnergy <= -threshold: #FOLDX\n if not qualitativeValue in ea:\n ea[qualitativeValue] = 1\n else:\n ea[qualitativeValue] += 1\n di[\"p\"].append(qualitativeValue)\n di[\"x\"].append(experimentalEnergy)\n di[\"y\"].append(foldXBindingEnergy)\n if polarityClassifier[peptide[1]] == \"P\":\n di3[\"p\"].append(qualitativeValue)\n di3[\"x\"].append(experimentalEnergy)\n di3[\"y\"].append(foldXBindingEnergy)\n if polarityClassifier[peptide[8]] == \"P\": # double polar\n di5[\"p\"].append(qualitativeValue)\n di5[\"x\"].append(experimentalEnergy)\n di5[\"y\"].append(foldXBindingEnergy)\n if not qualitativeValue in dp:\n dp[qualitativeValue] = 1\n else:\n dp[qualitativeValue] += 1\n if polarityClassifier[peptide[1]] == \"A\":\n di4[\"p\"].append(qualitativeValue)\n di4[\"x\"].append(experimentalEnergy)\n di4[\"y\"].append(foldXBindingEnergy)\n if polarityClassifier[peptide[8]] == \"A\": #double apolar\n di6[\"p\"].append(qualitativeValue)\n di6[\"x\"].append(experimentalEnergy)\n di6[\"y\"].append(foldXBindingEnergy)\n if not qualitativeValue in da:\n da[qualitativeValue] = 1\n else:\n da[qualitativeValue] += 1\n if qualitativeValue == \"Positive-High\":\n countdpHigh += 1\n else:\n countdpNoHigh += 1\n if qualitativeValue == \"Negative\":\n countdpNegative += 1\n if \"Positive\" in qualitativeValue:\n countdpPositive += 1\n if (polarityClassifier[peptide[1]] == \"P\") or (polarityClassifier[peptide[8]] == \"P\"): #some polar\n di7[\"p\"].append(qualitativeValue)\n di7[\"x\"].append(experimentalEnergy)\n di7[\"y\"].append(foldXBindingEnergy)\n if not qualitativeValue in sp:\n sp[qualitativeValue] = 1\n else:\n sp[qualitativeValue] += 1\n else:\n di2[\"p\"].append(qualitativeValue)\n di2[\"x\"].append(experimentalEnergy)\n di2[\"y\"].append(foldXBindingEnergy)\n if not qualitativeValue in ed:\n ed[qualitativeValue] = 1\n else:\n ed[qualitativeValue] += 1\n \n if PydockBindingEnergy <= -thresholdpy: #FOLDX\n if not qualitativeValue in eapy:\n eapy[qualitativeValue] = 1\n else:\n eapy[qualitativeValue] += 1\n dipy[\"p\"].append(qualitativeValue)\n dipy[\"x\"].append(experimentalEnergy)\n dipy[\"y\"].append(PydockBindingEnergy)\n if polarityClassifier[peptide[1]] == \"P\":\n di3py[\"p\"].append(qualitativeValue)\n di3py[\"x\"].append(experimentalEnergy)\n di3py[\"y\"].append(PydockBindingEnergy)\n if polarityClassifier[peptide[8]] == \"P\": # double polar\n di5py[\"p\"].append(qualitativeValue)\n di5py[\"x\"].append(experimentalEnergy)\n di5py[\"y\"].append(PydockBindingEnergy)\n if not qualitativeValue in dppy:\n dppy[qualitativeValue] = 1\n else:\n dppy[qualitativeValue] += 1\n if polarityClassifier[peptide[1]] == \"A\":\n di4py[\"p\"].append(qualitativeValue)\n di4py[\"x\"].append(experimentalEnergy)\n di4py[\"y\"].append(PydockBindingEnergy)\n if polarityClassifier[peptide[8]] == \"A\": #double apolar\n di6py[\"p\"].append(qualitativeValue)\n di6py[\"x\"].append(experimentalEnergy)\n di6py[\"y\"].append(PydockBindingEnergy)\n if not qualitativeValue in dapy:\n dapy[qualitativeValue] = 1\n else:\n dapy[qualitativeValue] += 1\n if qualitativeValue == \"Positive-High\":\n countdpHighpy += 1\n else:\n countdpNoHighpy += 1\n if qualitativeValue == \"Negative\":\n countdpNegativepy += 1\n if \"Positive\" in qualitativeValue:\n countdpPositivepy += 1\n if (polarityClassifier[peptide[1]] == \"P\") or (polarityClassifier[peptide[8]] == \"P\"): #some polar\n di7py[\"p\"].append(qualitativeValue)\n di7py[\"x\"].append(experimentalEnergy)\n di7py[\"y\"].append(PydockBindingEnergy)\n if not qualitativeValue in sppy:\n sppy[qualitativeValue] = 1\n else:\n sppy[qualitativeValue] += 1\n else:\n di2py[\"p\"].append(qualitativeValue)\n di2py[\"x\"].append(experimentalEnergy)\n di2py[\"y\"].append(PydockBindingEnergy)\n if not qualitativeValue in edpy:\n edpy[qualitativeValue] = 1\n else:\n edpy[qualitativeValue] += 1\n\n print(\"Threshold: \", threshold)\n print(\"Energy Accepted: \", ea)\n print(\"Energy Discarted: \", ed)\n print(\"Double Polar: \", dp)\n print(\"Double Apolar: \", da)\n print(\"Some Polar: \", sp)\n print(\"Double Polar High Ratio: \", round(countdpHigh/countdpNoHigh, 3))\n print(\"Positive / Negative Ratio: \", round(countdpPositive/countdpNegative, 3))\n\n \n\n sns.scatterplot(x=\"x\", y=\"y\", hue=\"p\", s=20, palette=colors, data=di6, alpha=1)\n sns.scatterplot(x=\"x\", y=\"y\", hue=\"p\", s=20, palette=colors, data=di2, alpha=0.1, legend=False)\n plt.plot([-threshold, -2], [-threshold, -threshold], linewidth=2, color=\"black\")\n plt.xlabel(\"ΔGexp (Kcal/mol)\")\n plt.ylabel(\"FoldX Interaction Energy (Kcal/mol)\")\n plt.ylim(-30,10)\n plt.xlim(-15,-2)\n plt.savefig(\"scatter_9-lenght_exp_foldx_doubleapolar_{}.png\".format(threshold))\n plt.show()\n\n \n print(\"Threshold: \", thresholdpy)\n print(\"Energy Accepted: \", eapy)\n print(\"Energy Discarted: \", edpy)\n print(\"Double Polar: \", dppy)\n print(\"Double Apolar: \", dapy)\n print(\"Some Polar: \", sppy)\n print(\"Double Polar High Ratio: \", round(countdpHighpy/countdpNoHighpy, 3))\n print(\"Positive / Negative Ratio: \", round(countdpPositivepy/countdpNegativepy, 3))\n\n sns.scatterplot(x=\"x\", y=\"y\", hue=\"p\", s=20, palette=colors, data=di6py, alpha=1)\n sns.scatterplot(x=\"x\", y=\"y\", hue=\"p\", s=20, palette=colors, data=di2py, alpha=0.1, legend=False)\n plt.plot([-thresholdpy, -2], [-thresholdpy, -thresholdpy], linewidth=2, color=\"black\")\n plt.xlabel(\"ΔGexp (Kcal/mol)\")\n plt.ylabel(\"Pydock Interaction Energy (Kcal/mol)\")\n plt.ylim(-70,10)\n plt.xlim(-15,-2)\n plt.savefig(\"scatter_9-lenght_exp_foldx_doubleapolar_{}_pydock.png\".format(threshold))\n plt.show()\n\n\ndef main():\n DBfilter()\n plotter()\n\n energyCalculator(outputFile, foldxIE, pydockIE)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cescgina/neoantigens","sub_path":"energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":14391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35491118941","text":"from cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\nfrom django.utils.translation import gettext as _\n\nfrom djangocms_leaflet.models import Map, Marker\n\n\n@plugin_pool.register_plugin\nclass MapPublisher(CMSPluginBase):\n \"\"\"Leaflet Map\"\"\"\n model = Map\n module = 'Leaflet Map'\n name = _('Map')\n render_template = 'djangocms_leaflet/map.html'\n allow_children = True\n child_classes = ['MarkerPublisher']\n\n def render(self, context, instance, placeholder):\n context.update({'instance': instance})\n return context\n\n\n@plugin_pool.register_plugin\nclass MarkerPublisher(CMSPluginBase):\n \"\"\"Marker for Leaflet Map\"\"\"\n model = Marker\n module = 'Leaflet Map'\n name = _('Marker')\n render_template = 'djangocms_leaflet/marker.html'\n require_parent = True\n parent_classes = ['MapPublisher']\n allow_children = False\n\n def render(self, context, instance, placeholder):\n context.update({'instance': instance})\n return context\n","repo_name":"MacLake/djangocms-leaflet","sub_path":"src/djangocms_leaflet/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29550479808","text":"from .serializers import ReturnSerializer\nfrom rest_framework import viewsets\nfrom .models import Return, Product\nfrom rest_framework.response import Response\nfrom datetime import date\nfrom rest_framework.decorators import api_view\nfrom django.shortcuts import get_object_or_404\nimport requests\nfrom rest_framework import status\nfrom .validators import validator_check_content_return, validator_check_return_is_summarized, validator_check_basket_id\n\n\nclass ReturnViewSet(viewsets.ModelViewSet):\n # queryset = Product.objects.all()\n serializer_class = ReturnSerializer\n\n def get_queryset(self):\n _return = Return.objects.all()\n return _return\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = ReturnSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = ReturnSerializer(instance)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n validator_check_basket_id(request.query_params['basket_id'])\n my_return = Return.objects.create(\n date=date.isoformat(date.today()),\n sum=0,\n basket_id=request.query_params['basket_id']\n )\n serializer = ReturnSerializer(my_return, many=False)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n return Response('Cannot update return')\n\n def destroy(self, request, *args, **kwargs):\n return Response('Cannot delete return')\n\n\ndef change_return_value(my_return, quantity, product_from_response, operation):\n previous_sum_return = float(my_return.sum)\n products_sum = float(quantity) * float(product_from_response['price'])\n if operation == 'add':\n actual_sum_return = previous_sum_return + products_sum\n elif operation == 'delete':\n actual_sum_return = previous_sum_return - products_sum\n else:\n return Response('Wrong action', status=status.HTTP_401_UNAUTHORIZED)\n my_return.sum = actual_sum_return\n my_return.save()\n return my_return\n\n\n@api_view(['POST'])\ndef add_product_to_return(request, return_id, product_id):\n\n my_return = get_object_or_404(Return, id=return_id)\n validator_check_return_is_summarized(my_return)\n response = requests.get(f'http://127.0.0.1:8001/api/products/{product_id}/')\n if response.status_code == 404:\n return Response('Product not found', status=status.HTTP_404_NOT_FOUND)\n\n product_from_response = response.json()\n quantity = request.data['quantity']\n\n my_return_response = requests.get(f'http://127.0.0.1:8004/api/returns/{return_id}/')\n product_in_return = my_return_response.json()\n\n for product in product_in_return['products']:\n if product['product_id'] == product_id:\n product_to_update = Product.objects.get(id=product['id'])\n to_add = int(product_to_update.quantity) + int(quantity)\n product_to_update.quantity = int(to_add)\n product_to_update.save()\n break\n\n else:\n Product.objects.create(\n product_id=int(product_from_response['id']),\n quantity=int(quantity),\n my_return=my_return\n )\n\n change_return_value(my_return, quantity, product_from_response, 'add')\n\n serializer = ReturnSerializer(my_return, many=False)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef delete_product_from_return(request, return_id, product_id):\n\n my_return = get_object_or_404(Return, id=return_id)\n validator_check_return_is_summarized(my_return)\n response = requests.get(f'http://127.0.0.1:8001/api/products/{product_id}/')\n product_from_response = response.json()\n quantity = request.data['quantity']\n\n my_return_response = requests.get(f'http://127.0.0.1:8004/api/returns/{return_id}/')\n product_in_return = my_return_response.json()\n\n access = False\n\n for product in product_in_return['products']:\n if product['product_id'] == product_id:\n access = True\n product_to_update = Product.objects.get(id=product['id'])\n to_update = int(product_to_update.quantity) - int(quantity)\n if to_update > 0:\n product_to_update.quantity = int(to_update)\n product_to_update.save()\n elif to_update < 0:\n return Response('Cannot remove more product than you have', status=status.HTTP_401_UNAUTHORIZED)\n elif to_update == 0:\n product_to_update.delete()\n else:\n return Response('Wrong value', status=status.HTTP_401_UNAUTHORIZED)\n break\n\n if not access:\n return Response('Cannot remove product which is not in the return', status=status.HTTP_401_UNAUTHORIZED)\n\n change_return_value(my_return, quantity, product_from_response, 'delete')\n\n serializer = ReturnSerializer(my_return, many=False)\n return Response(serializer.data)\n\n\ndef change_product_value(product_id, quantity):\n product_response = requests.get(f'http://127.0.0.1:8001/api/products/{product_id}/')\n my_product = product_response.json()\n update_product_quantity = my_product['quantity'] + quantity\n requests.put(f'http://127.0.0.1:8001/api/products/{product_id}/', data={\n 'name': '',\n 'description': '',\n 'price': '',\n 'quantity': update_product_quantity\n })\n\n\n@api_view(['GET'])\ndef summarize_return(request, return_id):\n my_return = get_object_or_404(Return, id=return_id)\n validator_check_return_is_summarized(my_return)\n validator_check_content_return(return_id)\n\n return_response = requests.get(f'http://127.0.0.1:8004/api/returns/{return_id}/')\n my_return_json = return_response.json()\n product_in_return = my_return_json['products']\n\n basket_in_return = my_return_json['basket_id']\n basket_response = requests.get(f'http://127.0.0.1:8002/api/baskets/{basket_in_return}/')\n basket_json = basket_response.json()\n\n product_in_basket = basket_json['products']\n\n fault = True\n for basket_product in product_in_basket:\n for return_product in product_in_return:\n if basket_product['product_id'] == return_product['product_id']:\n if basket_product['quantity'] < return_product['quantity']:\n break\n fault = False\n break\n\n if fault:\n raise Exception('You can not return this products')\n\n for product in my_return_json['products']:\n change_product_value(product['product_id'], product['quantity'])\n\n my_return.summarized = True\n my_return.save()\n serializer = ReturnSerializer(my_return, many=False)\n return Response(serializer.data)","repo_name":"justyna-eevee/MyShopReturnsApp","sub_path":"ReturnsApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28404200779","text":"\nclass Account():\n\t'''\n\tcreate an account with account ower and\n\tinitial money, capable of adding removing (conditional)\n\tfrom the account\n\t'''\n\n\tdef __init__(self, name, amount=0):\n\t\tself.owner = name\n\t\tself.amount = amount\n\n\tdef deposit(self, add_amount):\n\t\tself.amount += add_amount\n\t\treturn f'Deposit Accepted'\n\n\tdef withdraw(self, withdraw_amount):\n\t\ttmp = self.amount - withdraw_amount\n\t\tif tmp < 0:\n\t\t\treturn f'Funds Unavailable'\n\t\telse:\n\t\t\tself.amount = tmp\n\t\t\treturn f'Withdrawal Accepted'\n\tdef __str__(self):\n\t\t'''\n\t\toverwrite the object memory address\n\t\tand using print comes to __str__\n\t\t'''\n\t\treturn f'Account owner: {self.owner}\\nAccount balance: ${self.amount}'\n\n\n\tdef __del__(self):\n\t\tprint('Account of {} with content ${} was deleted'.format(self.owner, self.amount))\n\nif __name__ == '__main__':\n\n\tacct = Account('Anoosheh',100)\n\tprint(acct)\n\t#Account owner: Jose\n\t#Account balance: $100\n\tprint(acct.owner, 'Anoosheh')\n\tprint(acct.amount, '100')\n\tprint(acct.deposit(50), 'Deposit Accepted')\n\tprint(acct.withdraw(75), 'Withdrawal Accepted')\n\tprint(acct.withdraw(500), 'Funds Unavailable!')\n\tdel acct","repo_name":"ania4data/HTML_SQL_Webscraping","sub_path":"Python/excercise/oop3_j_udemy.py","file_name":"oop3_j_udemy.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2856585011","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport datetime as dt\nfrom os import getenv\nfrom pprint import pprint\nfrom walrus import Database\n\nfrom airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator\n\n\nNEWS_GRABBER_CONNECTION_URI = getenv('NEWS_GRABBER_CONNECTION_URI', 'redis://redis:6379/1')\n\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': dt.datetime(2020, 9, 8),\n 'retries': 10,\n 'retry_delay': dt.timedelta(minutes=1),\n 'depends_on_past': False,\n}\n\n\ndef start_grab_news():\n db = Database().from_url(NEWS_GRABBER_CONNECTION_URI)\n pprint(f'Connection URI = {NEWS_GRABBER_CONNECTION_URI}')\n result = db.Set('sources').unionstore('queue_rss_load_tasks')\n return str(result)\n\n\ndef grab_rss():\n db = Database().from_url(NEWS_GRABBER_CONNECTION_URI)\n pprint(f'Connection URI = {NEWS_GRABBER_CONNECTION_URI}')\n queue_rss_load_tasks = db.Set('queue_rss_load_tasks')\n queue_rss_parse_tasks = db.Set('queue_rss_parse_tasks')\n\n rss_load_task = queue_rss_load_tasks.pop()\n i = 0\n while rss_load_task:\n rss = json.loads(rss_load_task)\n agency = rss['agency']\n url = rss['url']\n pprint(f'Loading from <{agency}>: <{url}>')\n response = requests.get(url)\n pprint(f'Loaded from <{agency}>: <{url}> with code {response.status_code}')\n response_hash = hash(response.text)\n pprint(f'Response hash: {response_hash}')\n queue_rss_parse_tasks.add(json.dumps({'agency': agency, 'url': url, 'rss': response.text}))\n rss_load_task = queue_rss_load_tasks.pop()\n i += 1\n return f'Loaded {i} sources'\n\n\ndef parse_rss():\n db = Database().from_url(NEWS_GRABBER_CONNECTION_URI)\n pprint(f'Connection URI = {NEWS_GRABBER_CONNECTION_URI}')\n queue_rss_parse_tasks = db.Set('queue_rss_parse_tasks')\n queue_rss_parse_task = queue_rss_parse_tasks.pop()\n while queue_rss_parse_task:\n queue_rss_parse_task = queue_rss_parse_tasks.pop()\n\n\ndag = DAG(dag_id='grab_news', default_args=default_args, schedule_interval=dt.timedelta(minutes=5))\n\n\nstart_grab_news_task = PythonOperator(\n task_id='start_grab_rss',\n python_callable=start_grab_news,\n dag=dag\n)\n\nfor i in range(5):\n grab_rss_task = PythonOperator(\n task_id=f'grab_rss_{i}',\n python_callable=grab_rss,\n dag=dag\n )\n parse_rss_task = PythonOperator(\n task_id=f'parse_rss_{i}',\n python_callable=parse_rss,\n dag=dag\n )\n\n start_grab_news_task >> grab_rss_task >> parse_rss_task\n","repo_name":"taras-z/web-grabber","sub_path":"dags/grab_rss.py","file_name":"grab_rss.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7843263794","text":"# Project Euler Problem 9\n\n\ndef tripletFind(limit):\n triplets = []\n for x in range(1, limit):\n xx = x * x\n y = x + 1\n z = y + 1\n\n while (z <= limit):\n zz = xx + (y * y)\n while(z * z < zz):\n z += 1\n if z * z == zz and z <= limit:\n triplets.append((x, y, z))\n y += 1\n\n return triplets\n\nfor triplet in tripletFind(1000):\n if sum(triplet) == 1000:\n print(triplet[0] * triplet[1] * triplet[2])\n","repo_name":"sgeller98/project_euler","sub_path":"Solved/Problem_009.py","file_name":"Problem_009.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28777010654","text":"import cv2\nimport numpy as np\n\n\nfrom numba import njit\n\n\n@njit\ndef region_crescimento( image, seed=None):\n rows, cols = image.shape[:2]\n xc, yc = seed\n segmented = np.zeros_like(image)\n segmented[xc, yc] = 255\n current_found = 0\n previous_point = 1\n while previous_point != current_found:\n previous_point = current_found\n current_found = 0\n for row in range(rows):\n for col in range(cols):\n if segmented[row, col] == 255:\n if image[row-1, col-1] < 127:\n segmented[row-1, col-1] = 255\n current_found += 1\n if image[row-1, col] < 127:\n segmented[row - 1, col] = 255\n current_found += 1\n if image[row-1, col + 1] < 127:\n segmented[row - 1, col + 1] = 255\n current_found += 1\n if image[row, col-1] < 127:\n segmented[row, col - 1] = 255\n current_found += 1\n if image[row, col+1] < 127:\n segmented[row - 1, col + 1] = 255\n current_found += 1\n if image[row+1, col-1] < 127:\n segmented[row + 1, col - 1] = 255\n current_found += 1\n if image[row+1, col] < 127:\n segmented[row - 1, col - 1] = 255\n current_found += 1\n if image[row+1, col+1] < 127:\n segmented[row + 1, col + 1] = 255\n current_found += 1\n return segmented\n\n\nif __name__ == '__main__':\n image = cv2.imread(\"image (1).jpg\")\n grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n segmented_image = region_crescimento(grayscale, seed=(int(grayscale.shape[0]/2), int(grayscale.shape[1]/2)))\n","repo_name":"Pedrcavalc/Exercicios-em-Python-openCV","sub_path":"20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4382815687","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = \"hsz\"\n\n\"\"\"\n如果a + b + c = 1000,且 a2 + b2 = c2 (a,b,c为自然数),如何求出所有a,b,c可能的组合\n\"\"\"\n\nimport time\n\n# start_time = time.time()\n#\n# # 注意是三重循环\n#\n# for a in range(0, 1001):\n# for b in range(0, 1001):\n# for c in range(0, 1001):\n# if a + b + c == 1000 and a ^ 2 + b ^ 2 == c ^ 2:\n# print(\"a,b,c:%d,%d,%d\" % (a, b, c))\n#\n# end_time = time.time()\n#\n# print(end_time - start_time)\n# print(\"complete\")\nstart_time = time.time()\nfor a in range(0, 1001):\n for b in range(0, 1001 - a):\n c = 1000 - a - b\n if a ^ 2 + b ^ 2 == c ^ 2:\n if a + b + c == 1000 and a ^ 2 + b ^ 2 == c ^ 2:\n print(\"a,b,c:%d,%d,%d\" % (a, b, c))\n\nend_time = time.time()\n\nprint(end_time - start_time)\nprint(\"complete\")\n","repo_name":"Thousandhack/algorithms_training","sub_path":"review_sort/train_test/19_a_b_c_demo.py","file_name":"19_a_b_c_demo.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7376656480","text":"#!/usr/bin/python3\n\nimport hashlib\nimport datetime\nimport export_pb2 # https://github.com/google/exposure-notifications-server/blob/main/internal/pb/export/export.proto\nimport json\nimport os\nimport urllib.parse\nimport urllib.request\nimport zipfile\n\ncache_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'cache'))\nos.makedirs(cache_dir, exist_ok=True)\n\ncdn_prefix = 'https://covid19radar-jpn-prod.azureedge.net/c19r/'\nlist_urls = [\n ('list1.json', cdn_prefix + '440/list.json'),\n ('list2.json', cdn_prefix + '441/list.json'),\n]\n\nnow = datetime.datetime.now()\n\ndef cached_fetch(name, url):\n path = os.path.join(cache_dir, name)\n cache_hit = False\n try:\n rv = os.stat(path)\n cached = datetime.datetime.fromtimestamp(rv.st_mtime)\n cache_hit = now < cached + datetime.timedelta(hours=1)\n except FileNotFoundError as e:\n pass\n\n if not cache_hit:\n urllib.request.urlretrieve(url, path)\n return path\n\nfile_urls = []\nfor name, url in list_urls:\n with open(cached_fetch(name, url)) as fd:\n for item in json.load(fd):\n mtime = datetime.datetime.fromtimestamp(item['created'] / 1000.0)\n if mtime > now - datetime.timedelta(days=3):\n file_urls.append(item['url'])\n\n# i = 0\nfor url in file_urls:\n # i += 1\n name = hashlib.sha256(url.encode()).hexdigest()[:8]\n # name = os.path.basename(urllib.parse.urlparse(url).path)\n # print(name)\n with zipfile.ZipFile(cached_fetch(name, url)) as zip:\n with zip.open('export.bin') as bin:\n assert(bin.read(16) == b'EK Export v1 ')\n teke = export_pb2.TemporaryExposureKeyExport.FromString(bin.read())\n # print(teke)\n for key in teke.keys:\n # print(' key_data: {}'.format(key.key_data))\n ts = key.rolling_start_interval_number * 10 * 60\n # dur = key.rolling_period * 10\n print('{}: {}'.format(key.key_data.hex(), datetime.datetime.fromtimestamp(ts)))\n # if key.HasField('report_type'):\n # name = export_pb2.TemporaryExposureKey.ReportType.Name(key.report_type)\n # print(' report_type: {}'.format(name))\n # print(key)\n","repo_name":"tzik/cocoa-dump","sub_path":"dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"30949220543","text":"import typing\nfrom enum import Enum\n\nimport strawberry\nfrom strawberry.field import convert_args\n\n\ndef test_simple_types():\n args = {\"integer\": 1, \"string\": \"abc\", \"float\": 1.2}\n\n annotations = {\"integer\": int, \"string\": str, \"float\": float}\n\n assert convert_args(args, annotations) == {\n \"integer\": 1,\n \"string\": \"abc\",\n \"float\": 1.2,\n }\n\n\ndef test_list():\n args = {\n \"integer_list\": [1, 2],\n \"string_list\": [\"abc\", \"cde\"],\n \"float_list\": [1.2, 2.3],\n }\n\n annotations = {\n \"integer_list\": typing.List[int],\n \"string_list\": typing.List[str],\n \"float_list\": typing.List[float],\n }\n\n assert convert_args(args, annotations) == {\n \"integer_list\": [1, 2],\n \"string_list\": [\"abc\", \"cde\"],\n \"float_list\": [1.2, 2.3],\n }\n\n\ndef test_input_types():\n @strawberry.input\n class MyInput:\n abc: str\n say_hello_to: str\n was: int = strawberry.field(name=\"having\", is_input=True)\n fun: str = strawberry.field(is_input=True)\n\n args = {\n \"input\": {\"abc\": \"example\", \"sayHelloTo\": \"Patrick\", \"having\": 10, \"fun\": \"yes\"}\n }\n\n annotations = {\"input\": MyInput}\n\n assert convert_args(args, annotations) == {\n \"input\": MyInput(abc=\"example\", say_hello_to=\"Patrick\", was=10, fun=\"yes\")\n }\n\n\ndef test_optional_input_types():\n @strawberry.input\n class MyInput:\n abc: str\n\n args = {\"input\": {\"abc\": \"example\"}}\n\n annotations = {\"input\": typing.Optional[MyInput]}\n\n assert convert_args(args, annotations) == {\"input\": MyInput(abc=\"example\")}\n\n\ndef test_list_of_input_types():\n @strawberry.input\n class MyInput:\n abc: str\n\n args = {\"input_list\": [{\"abc\": \"example\"}]}\n\n annotations = {\"input_list\": typing.List[MyInput]}\n\n assert convert_args(args, annotations) == {\"input_list\": [MyInput(abc=\"example\")]}\n\n\ndef test_optional_list_of_input_types():\n @strawberry.input\n class MyInput:\n abc: str\n\n args = {\"input_list\": [{\"abc\": \"example\"}]}\n\n annotations = {\"input_list\": typing.Optional[typing.List[MyInput]]}\n\n assert convert_args(args, annotations) == {\"input_list\": [MyInput(abc=\"example\")]}\n\n\ndef test_nested_input_types():\n @strawberry.enum\n class ChangeType(Enum):\n MAJOR = \"major\"\n MINOR = \"minor\"\n PATCH = \"patch\"\n\n @strawberry.input\n class ReleaseInfo:\n change_type: ChangeType\n changelog: str\n\n @strawberry.enum\n class ReleaseFileStatus(Enum):\n MISSING = \"missing\"\n INVALID = \"invalid\"\n OK = \"ok\"\n\n @strawberry.input\n class AddReleaseFileCommentInput:\n pr_number: int\n status: ReleaseFileStatus\n release_info: typing.Optional[ReleaseInfo]\n\n args = {\n \"input\": {\n \"prNumber\": 12,\n \"status\": ReleaseFileStatus.OK.value,\n \"releaseInfo\": {\n \"changeType\": ChangeType.MAJOR.value,\n \"changelog\": \"example\",\n },\n }\n }\n\n annotations = {\"input\": AddReleaseFileCommentInput}\n\n assert convert_args(args, annotations) == {\n \"input\": AddReleaseFileCommentInput(\n pr_number=12,\n status=ReleaseFileStatus.OK,\n release_info=ReleaseInfo(change_type=ChangeType.MAJOR, changelog=\"example\"),\n )\n }\n\n args = {\n \"input\": {\n \"prNumber\": 12,\n \"status\": ReleaseFileStatus.OK.value,\n \"releaseInfo\": None,\n }\n }\n\n annotations = {\"input\": AddReleaseFileCommentInput}\n\n assert convert_args(args, annotations) == {\n \"input\": AddReleaseFileCommentInput(\n pr_number=12, status=ReleaseFileStatus.OK, release_info=None\n )\n }\n","repo_name":"sofia100/strawberry","sub_path":"tests/test_arguments_converter.py","file_name":"test_arguments_converter.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"9099935043","text":"import numpy as np\nfrom PIL import Image\n\nclass Stereo :\n '''\n Stereo 기본 베이스다. 이 구현에서는 left 이미지와 right 이미지를 알고 있다고 가정한다.\n 물론 몰라도 된다. 이 구현에서는 몰라도 작동되게 되어있다.\n 그렇지만 모른다면 search space가 매우 커진다.\n \n 먼저 left image와 right image, kernel size를 초기화해준다.\n 그리고 stereo 작동시 필요한 half of kernel size, row_size, col_size를 초기화해준다.\n\n 출력해야되는 값인 disparity도 미리 준비해둔다.\n '''\n def __init__(self, left_img_path, right_img_path, kernel_size) :\n \n left_img = Image.open(left_img_path).convert('L')\n self.left_img = np.asarray(left_img)\n\n right_img = Image.open(right_img_path).convert('L')\n self.right_img = np.asarray(right_img)\n\n self.kernel_size = kernel_size\n self.kernel_size_half = int(self.kernel_size / 2)\n\n print(self.left_img.shape)\n self.row_size, self.col_size = self.left_img.shape \n \n self.disparity = np.zeros((self.row_size, self.col_size), np.uint8)\n\n '''\n row를 기준으로 epipolar 라인을 설정한다. 그리고 row의 각 colunm을 중심으로 하는 패치가 있다고 생각한다.\n left 이미지의 patch를 따온다(center 값을 전달한다.)\n 그리고 그 패치를 right 이미지의 패치와 비교한다.\n ssd가 가장 작을 때의 right image colunm 값과 지금 비교의 기준인 left image colunm 값을 뺀 patch라는 값을 disparity에 저장한다.\n '''\n def run(self) :\n for row in range(self.kernel_size_half, self.row_size - self.kernel_size_half): \n print(\"\\rProcessing.. %d%% complete\"%(row / (self.row_size - self.kernel_size_half) * 100), end=\"\", flush=True) \n \n for col in range(self.kernel_size_half, self.col_size - self.kernel_size_half):\n min_ssd = 987654321\n best_patch = 0\n \n for patch in range(100): \n ssd = self.cal_ssd((row, col), patch)\n if ssd < min_ssd:\n min_ssd = ssd\n best_patch = patch\n \n self.disparity[row, col] = (best_patch / 100)* 255\n\n def save(self, file_name) :\n Image.fromarray(self.disparity).save(file_name)\n\n '''\n ssd를 계산합니다. ssd를 이용하는 것이 편하다.\n ncc도 도전해보았는데 이미지를 [-1, 1]로 정규화하는 과정이 필요해 번거롭다.\n 정규화 안하고 ncc를 사용하면...결과가 좋지 않았다.\n '''\n def cal_ssd(self, center, patch) :\n ssd = 0 \n \n for u in range(-self.kernel_size_half, self.kernel_size_half):\n for v in range(-self.kernel_size_half, self.kernel_size_half):\n temp_ssd = int(self.left_img[center[0]+u, center[1]+v]) - int(self.right_img[center[0]+u, (center[1]+v) - patch]) \n ssd += (temp_ssd ** 2)\n\n return ssd\n '''\n ncc를 계산합니다. ssd를 이용하는 것이 편하다.\n '''\n def cal_ncc(self, center, patch) :\n ncc = 0 \n \n for u in range(-self.kernel_size_half, self.kernel_size_half):\n for v in range(-self.kernel_size_half, self.kernel_size_half):\n ncc += int(self.left_img[center[0]+u, center[1]+v]) * int(self.right_img[center[0]+u, (center[1]+v) - patch]) \n \n\n return 1 - ncc\n\nif __name__ == '__main__':\n stereo = Stereo('img/im2.png', 'img/im6.png', 12)\n\n stereo.run()\n stereo.save('version_new3.png')","repo_name":"yugeeklab/stereo","sub_path":"stereo.py","file_name":"stereo.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30666135703","text":"# -*- coding: utf-8 -*-\n\nimport zipfile\nimport tarfile\nimport os\nimport re\nfrom tnmlearn.other import paths\n\n\ndef extract_file(filePath, to_directory):\n if filePath.endswith('.zip'):\n opener, mode = zipfile.ZipFile, 'r'\n elif filePath.endswith('.tar.gz') or filePath.endswith('.tgz'):\n opener, mode = tarfile.open, 'r:gz'\n elif filePath.endswith('.tar.bz2') or filePath.endswith('.tbz'):\n opener, mode = tarfile.open, 'r:bz2'\n else: \n return\n\n os.makedirs(to_directory, exist_ok=True)\n file = opener(filePath, mode)\n try: file.extractall(to_directory)\n finally: file.close()\n \n\ndef split_dog_cat_image_files(traindir):\n catdir = os.path.join(traindir, 'cat')\n dogdir = os.path.join(traindir, 'dog')\n os.makedirs(catdir, exist_ok=True)\n os.makedirs(dogdir, exist_ok=True)\n \n imagepaths = [(f, os.path.basename(f)) for f in paths.list_images(traindir)]\n imagepaths = [(f, os.path.join(dogdir if n.startswith('dog') else catdir, n)) \n for (f, n) in imagepaths]\n \n for (f, fn) in imagepaths:\n os.rename(f, fn)\n\n\ndef split_17flowers(traindir):\n for dir_id in range(17):\n os.makedirs(os.path.join(traindir, 'dir_'+str(dir_id)), exist_ok=True)\n \n imagepaths = [(f, os.path.basename(f)) for f in paths.list_images(traindir)]\n imagepaths = [(f, os.path.join(traindir, 'dir_'+str((int(i)-1)//80), n)) \n for (f, n) in imagepaths \n for i in re.findall('(\\d{4})', n)]\n \n for (f, fn) in imagepaths:\n os.rename(f, fn)\n\n","repo_name":"t2wain/machine-learning","sub_path":"tnmlearn/datasets/dog_cat_dataset.py","file_name":"dog_cat_dataset.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16009829407","text":"import subprocess\nimport os\nfrom subprocess import CompletedProcess\nfrom .logger import logger\n\nclass Command:\n def __init__(self, process: CompletedProcess) -> None:\n \"\"\"\n Command should never be initialized using Command(...); instead, always use classmethod Command.run(...)\n :param process: a completed process\n \"\"\"\n self.process = process\n self.output = self.process.stdout\n self.error = self.process.stderr\n \n def __getitem__(self, item):\n if item == 0:\n return self.process\n elif item == 1:\n return self.output\n elif item == 2: \n return self.error\n else:\n raise IndexError(\"Object only has three indices: \\n[0] process:subprocess.CompletedProcess\\n[1] output:str\\n[2] error:str\")\n\n def __str__(self) -> str:\n return subprocess.list2cmdline(self.process.args)\n \n def recover_command(self) -> 'list[str]':\n \"\"\"return the list of arguments send to the executable; use str(Output) to recover the full command-line as a string\n\n :return: argument list used in a command\n :rtype: list[str]\n \"\"\"\n r:list[str] = self.process.args\n return r\n\n @classmethod\n def run(cls, cmd:str, executable:str='/bin/bash', cwd:str=os.getcwd(), suppress_log = False):\n if not suppress_log:\n logger.debug(f\"\"\"attempting to execute \"{cmd}\" using \"{executable}\" in \"{cwd}\" \"\"\")\n if os.access(executable, os.X_OK):\n if os.path.isdir(cwd):\n try:\n process = subprocess.run(cmd, shell=True, executable=executable, cwd=cwd, capture_output=True, text=True)\n except Exception as e:\n logger.warning(f\"an exception occurred while trying to execute command: {e}\")\n else:\n if not suppress_log:\n logger.info(f\"process will be returned as a Command object in index 0\")\n if process.stdout and not suppress_log:\n logger.info(f\"BEGIN OUTPUT FROM COMMAND: \\n{process.stdout}\")\n logger.info(f\"END OUTPUT FROM COMMAND\")\n logger.info(f\"output will be returned as an Output object in index 1\")\n if process.stderr and not suppress_log:\n logger.error(f\"BEGIN OUTPUT FROM COMMAND: \\n{process.stderr}\")\n logger.error(f\"END OUTPUT FROM COMMAND\")\n logger.info(f\"error will be returned as an Output object in index 2\")\n return cls(process)\n \n else:\n logger.error(f\"'{cwd}' is not a directory\")\n raise RuntimeError()\n else:\n logger.error(f\"'{executable}' is not an executable\")\n raise RuntimeError()","repo_name":"The-Nicholas-R-Barrow-Company-LLC/PyMacApp","sub_path":"pymacapp/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"52"} +{"seq_id":"13004585621","text":"from troposphere import Join, Ref, Sub, Tag, Tags\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupRule\n\nfrom . import (\n USE_DOKKU,\n USE_EB,\n USE_EC2,\n USE_ECS,\n USE_EKS,\n USE_GOVCLOUD,\n USE_NAT_GATEWAY\n)\nfrom .common import administrator_ip_address\nfrom .template import template\nfrom .vpc import vpc\n\nif not USE_EKS:\n # EKS manages its own ELBs, so this stack doesn't have one\n load_balancer_security_group = SecurityGroup(\n \"LoadBalancerSecurityGroup\",\n template=template,\n GroupDescription=\"Web load balancer security group.\",\n VpcId=Ref(vpc),\n SecurityGroupIngress=[\n # allow incoming traffic from the public internet to the load balancer\n # on ports 80 and 443\n SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=port,\n ToPort=port,\n CidrIp=\"0.0.0.0/0\",\n ) for port in [\"80\", \"443\"]\n ],\n Tags=Tags(\n Name=Join(\"-\", [Ref(\"AWS::StackName\"), \"elb\"]),\n ),\n )\n\n # allow traffic from the load balancer subnets to the web workers\n if USE_ECS or USE_EC2:\n # if using ECS or EC2, allow traffic to the configured WebWorkerPort\n web_worker_ports = [Ref(\"WebWorkerPort\")]\n elif USE_GOVCLOUD:\n # if using GovCloud (real EC2 instances), allow traffic to the configured\n # WebWorkerPort and port 443\n web_worker_ports = [Ref(\"WebWorkerPort\"), \"443\"]\n else:\n # otherwise, if using Elastic Beanstalk, allow traffic only to EB's default\n # web worker port (80)\n web_worker_ports = [\"80\"]\n\n # HTTP from web load balancer\n ingress_rules = [SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=port,\n ToPort=port,\n SourceSecurityGroupId=Ref(load_balancer_security_group),\n ) for port in web_worker_ports]\n\n # Health check\n if not USE_EB and not USE_DOKKU:\n ingress_rules.append(SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=Ref(\"WebWorkerHealthCheckPort\"),\n ToPort=Ref(\"WebWorkerHealthCheckPort\"),\n Description=\"ELB Health Check\", # SecurityGroupRule doesn't support a Description attribute\n SourceSecurityGroupId=Ref(load_balancer_security_group),\n ))\n\n if not USE_NAT_GATEWAY:\n # Allow direct administrator access via SSH.\n ingress_rules.append(SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=\"22\",\n ToPort=\"22\",\n Description=\"Administrator SSH Access\",\n CidrIp=administrator_ip_address,\n ))\nelse:\n ingress_rules = []\n\ncontainer_security_group = SecurityGroup(\n # NOTE: If creating an EKS cluster, eks.py will modify this security group.\n 'ContainerSecurityGroup',\n template=template,\n GroupDescription=\"Container security group.\",\n VpcId=Ref(vpc),\n SecurityGroupIngress=ingress_rules,\n Tags=Tags(\n Tag(\"Name\", Join(\"-\", [Ref(\"AWS::StackName\"), \"container\"])),\n *(\n [Tag(Sub(\"kubernetes.io/cluster/${EksCluster}\"), \"owned\")]\n if USE_EKS\n else []\n ),\n ),\n)\n","repo_name":"caktus/aws-web-stacks","sub_path":"stack/security_groups.py","file_name":"security_groups.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"52"} +{"seq_id":"36386426164","text":"from django.shortcuts import render, HttpResponseRedirect, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom twitterclone.tweets.models import Tweet\nfrom twitterclone.tweets.forms import AddTweetForm\nimport re\nfrom twitterclone.notifications.models import Notifications\nfrom twitterclone.twitterusers.models import TwitterUser\n\n\ndef view_tweet(request, id):\n html = 'tweet.html'\n data = Tweet.objects.filter(id=id)\n return render(request, html, {'data': data})\n\n\n@login_required\ndef viewhomepage(request):\n html = 'index.html'\n following = request.user.twitteruser.following.all()\n data = Tweet.objects.filter(\n tweet_author__in=following).order_by('-post_date')\n return render(request, html, {'data': data})\n\n\n@login_required\ndef make_tweets(request):\n html = 'add_tweet.html'\n if request.method == 'POST':\n form = AddTweetForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n tweet = Tweet.objects.create(\n tweet_author=request.user.twitteruser,\n body=data['body']\n )\n if '@' in data['body']:\n usernames = re.findall(r'@(\\w+)', data['body'])\n for username in usernames:\n twitteruser = TwitterUser.objects.get(\n user__username=username)\n Notifications.objects.create(\n tweet=tweet,\n twitter_user=twitteruser\n )\n return HttpResponseRedirect(reverse('homepage'))\n form = AddTweetForm()\n return render(request, html, {'form': form})\n","repo_name":"mwilliamson21/Twitterclone1","sub_path":"twitterclone/tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5261132788","text":"import os\nimport sys\nimport torch\nimport timm\nimport torchvision.transforms as T\nfrom torchvision.models.resnet import resnet18\nimport torch.nn as nn\n\n\nclass EmbeddingModel:\n \"\"\"Class representing the embedding model\n\n :param train_dir: Training directory containing the saved weights of the embedding model\n :param dataset: Name of the dataset\n\n :ivar train_dir: Training directory\n :ivar dataset: Name of the dataset\n :ivar args: Training arguments\n :ivar device: Device\n :ivar emb_model: Embedding model\n \"\"\"\n def __init__(self, train_dir, dataset):\n self.train_dir = train_dir\n self.dataset = dataset.lower()\n if self.dataset == 'cifar100':\n self.args = {'dataset': self.dataset, 'fe_model': 'efficientnet_b1', 'num_classes': 20, 'batch': 800}\n elif self.dataset == 'nih':\n self.args = {'dataset': self.dataset, 'fe_model': 'resnet18', 'num_classes': 2, 'batch': 800}\n else:\n print(f'Dataset {self.dataset} not implemented')\n sys.exit()\n self.device = get_device()\n self.emb_model = self.get_emb_model(os.getcwd())\n\n def get_emb_model(self, wkdir):\n \"\"\"Initialize base model\n\n :param wkdir:\n :return: model\n \"\"\"\n if self.dataset == 'cifar100':\n # load model\n model = timm.create_model(self.args['fe_model'], pretrained=True, num_classes=self.args['num_classes'])\n model = self.load_emb_net_from_checkpoint(model, wkdir)\n model = torch.nn.Sequential(*list(model.children())[:-1])\n elif self.dataset == 'nih':\n model = Resnet(self.args['num_classes'], self.train_dir)\n print('Loaded Model', self.args['fe_model'])\n model = to_device(model, self.device)\n return model\n\n def get_embedding(self, batch):\n \"\"\"Get embedding from images\n\n :param batch: Batch of images\n :return: Feature vectors\n \"\"\"\n if self.dataset == 'cifar100':\n batch = T.Resize((224, 224))(batch)\n self.emb_model.eval()\n batch_features = self.emb_model(batch)\n return batch_features\n\n def get_emb_net_dir(self, wkdir):\n \"\"\"Get training directory of the embedding net\n\n :param wkdir: Working directory\n :return: base_cnn_dir\n \"\"\"\n args_base = {'model': self.args['fe_model'],\n 'num_classes': self.args['num_classes'],\n 'batch': 64}\n\n base_cnn_dir = get_train_dir(wkdir, args_base, 'base_net')\n return base_cnn_dir\n\n def load_emb_net_from_checkpoint(self, emb_model, wkdir, mode='best'):\n \"\"\"Load base model weights from checkpoint\n\n :param emb_model: Initialized base model\n :param wkdir: Working directory\n :param mode: Checkpoint to load (best or latest)\n :return: base model\n \"\"\"\n # get checkpoint\n cp_dir = self.get_emb_net_dir(wkdir) + 'checkpoints/checkpoint.' + mode\n try:\n # load state dict from checkpoint\n checkpoint = torch.load(cp_dir)\n emb_model.load_state_dict(checkpoint['model_state_dict'])\n print('Found base net checkpoint at', cp_dir)\n except FileNotFoundError:\n print('No base net Checkpoint found at', cp_dir)\n sys.exit()\n\n # freeze base model layers\n for param in emb_model.parameters():\n param.requires_grad = False\n\n return emb_model\n\n\ndef get_device():\n \"\"\"Get active device\n\n :return: device\n \"\"\"\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n return torch.device(\"cpu\")\n\n\ndef to_device(data, device):\n \"\"\"Load to device\n\n :param data: Data\n :param device: Device\n :return: Data loaded to device\n \"\"\"\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)\n\n\ndef concat_args(args, mode):\n \"\"\"Concatenate args to string\n\n :param args: Args\n :param mode: Mode\n :return: String\n \"\"\"\n args_string = mode + '@'\n for key in args:\n if key not in ['batch', 'epochs', 'input_shape']:\n args_string += str(key) + '-' + str(args[key]) + '-'\n return args_string[:-1]\n\n\ndef get_train_dir(wkdir, args, mode):\n \"\"\"Get or create training directory\n\n :param wkdir: Working directory\n :param mode: Mode\n :param args: Args\n \"\"\"\n\n path = wkdir + '/CIFAR100/' + concat_args(args, mode) + '/'\n try:\n os.mkdir(path)\n except:\n pass\n return path\n\n\nclass Resnet(torch.nn.Module):\n def __init__(self, num_classes, train_dir):\n super().__init__()\n self.num_classes = num_classes\n self.resnet = resnet18(pretrained=True)\n\n try:\n print('load Resnet-18 checkpoint')\n print(self.load_my_state_dict(\n torch.load(\n train_dir + \"/NIH/emb_net@dataset-nih-model-resnet18-num_classes-2/checkpoints/checkpoint.pretrain\"),\n strict=False))\n except KeyError:\n print('load Resnet-18 pretrained on ImageNet')\n\n self.resnet.fc = nn.Linear(self.resnet.fc.in_features, num_classes)\n\n\n def load_my_state_dict(self, state_dict, strict=True):\n pretrained_dict = {k: v for k, v in state_dict.items() if 'fc' not in k}\n self.resnet.load_state_dict(pretrained_dict, strict=strict)\n\n def forward(self, x, return_features=True):\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n x = self.resnet.layer4(x)\n x = self.resnet.avgpool(x)\n x = torch.flatten(x, 1)\n features = torch.flatten(x, 1)\n if return_features:\n return features\n else:\n out = self.resnet.fc(features)\n out = nn.Softmax(dim=1)(out)\n return out\n\n","repo_name":"ptrckhmmr/learning-to-defer-with-limited-expert-predictions","sub_path":"Embedding-Semi-Supervised/feature_extractor/embedding_model.py","file_name":"embedding_model.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"28190442712","text":"## @package wboarder.data.Board\n#\n# Contains the Board class\n#\n# \\author Steven Maio\n\nfrom src.wboarder.data.Point import Point\n\n\n## A board object representing the data in the image\n#\nclass Board:\n\n ## Constructs an empty board with **num_rows** rows and **num_cols** columns.\n # \n # Initializes an empty board, in which the default value for every entry\n # is 0.\n #\n # @param num_cols the number of columns in the board\n # @param num_rows the number of rows in the board\n # @param default_value the default value to assign to each row\n def __init__(self, num_rows : int, num_cols : int, default_value : object):\n self._num_rows = num_rows\n self._num_cols = num_cols\n points = []\n for i in range(num_rows):\n rows = []\n for j in range(num_cols):\n rows.append(default_value)\n points.append(rows)\n self._points = points\n\n ## Returns the number of rows on the board\n #\n # @return an integer value indicating the total number of rows\n def getNumRows(self) -> int:\n return self._num_rows\n\n ## Returns the number of rows on the board\n #\n # @return an integer value indicating the total number of columns\n def getNumCols(self) -> int:\n return self._num_cols\n\n ## Accessor method for the board. Returns the value at location (row, col)\n # on the board.\n #\n # @param row the row number of the entry\n # @param col the column number of the entry\n # @return the float value stored at row,col\n def get(self, point : Point) -> object:\n row = point.getRow()\n col = point.getCol()\n if 0 <= row < self._num_rows and 0 <= col < self._num_cols:\n return self._points[row][col]\n else:\n raise ValueError(\"illegal value for row or col\")\n\n ## Mutator method for the board. Sets the value at location (row, col) to\n # value \n #\n # @param row the row number of the entry being modified\n # @param col the column number of the entry being modified\n # @param value the new value of the entry being modified\n def set(self, row : int, col : int, value : object) -> None:\n if 0 <= row < self._num_rows and 0 <= col < self._num_cols:\n self._points[row][col] = value\n else:\n raise ValueError(\"illegal value for row or col\")\n","repo_name":"StevenMaio/wdrawer","sub_path":"src/wboarder/data/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18664821750","text":"import argparse\r\nfrom net import Net\r\nimport os\r\nimport time\r\nfrom thop import profile\r\nimport torch\r\n\r\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\r\nparser = argparse.ArgumentParser(description=\"PyTorch BasicIRSTD Parameter and FLOPs\")\r\nparser.add_argument(\"--model_names\", default=['ACM', 'ALCNet', 'DNANet', 'ISNet', 'RISTDnet', 'UIUNet', 'U-Net', 'RDIAN', 'ISTDU-Net'], type=list, \r\n help=\"model_name: 'ACM', 'ALCNet', 'DNANet', 'ISNet', 'RISTDnet', 'UIUNet', 'U-Net', 'RDIAN', 'ISTDU-Net'\")\r\n\r\nglobal opt\r\nopt = parser.parse_args()\r\n\r\nif __name__ == '__main__':\r\n opt.f = open('./params_' + (time.ctime()).replace(' ', '_') + '.txt', 'w')\r\n input_img = torch.rand(1,1,256,256).cuda()\r\n for model_name in opt.model_names:\r\n net = Net(model_name, mode='test').cuda() \r\n flops, params = profile(net, inputs=(input_img, ))\r\n print(model_name)\r\n print('Params: %2fM' % (params/1e6))\r\n print('FLOPs: %2fGFLOPs' % (flops/1e9))\r\n opt.f.write(model_name + '\\n')\r\n opt.f.write('Params: %2fM\\n' % (params/1e6))\r\n opt.f.write('FLOPs: %2fGFLOPs\\n' % (flops/1e9))\r\n opt.f.write('\\n')\r\n opt.f.close()\r\n ","repo_name":"XinyiYing/BasicIRSTD","sub_path":"cal_params.py","file_name":"cal_params.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"52"} +{"seq_id":"10512615325","text":"from math import pi\n\n\ndef Fastener_Type(thickness_backplate, thickness_vehiclewall, inner_diameter_fastener, outer_diameter_fastener, youngs_modulus_backplate, youngs_modulus_fastener,\n youngs_modulus_vehiclewall):\n\n t2 = thickness_backplate\n t3 = thickness_vehiclewall\n D_fi = inner_diameter_fastener\n D_fo = outer_diameter_fastener\n Eb = youngs_modulus_backplate\n Ef = youngs_modulus_fastener\n Ev = youngs_modulus_vehiclewall\n\n # Determining the typical substitution length\n L_nut = 0.4 * D_fo\n L_shank = t2 + t3\n L_head = 0.4 * D_fo # Cylindrical Head\n L_eng = 0.4 * D_fi # Nut-tightened\n\n # Area of the different parts of the bolt\n A_nut = pi * ((D_fo / 2) ** 2)\n A_shank = pi * ((D_fi / 2) ** 2)\n A_head = pi * ((D_fo / 2) ** 2)\n A_eng = pi * ((D_fi / 2) ** 2)\n\n try:\n # Calculating the delta for the back_plate\n delta_a1 = 4 * t2 / (Eb * pi * ((D_fo ** 2) - (D_fi ** 2)))\n\n # Calculating the delta for the vehicle wall\n delta_a2 = 4 * t3 / (Ev * pi * ((D_fo ** 2) - (D_fi ** 2)))\n\n # Calculating the delta for the bolt\n delta_b = (1 / Ef) * ((L_nut / A_nut) + (L_shank / A_shank) + (L_head / A_head) + (L_eng / A_eng))\n\n # Force ratio\n phi = (delta_a1 + delta_a2) / (delta_b + delta_a1 + delta_a2)\n except ZeroDivisionError:\n phi = None\n\n return phi\n","repo_name":"rmachavariani/wp4","sub_path":"Fastener_type.py","file_name":"Fastener_type.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"22062670952","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport stock_basic\nimport stock_his\nimport logging\nimport logging.config\n\nlogging.config.fileConfig('logger.conf')\nlogger = logging.getLogger('stock')\n\ndef init_base_data(random_code,_date=None):\n _base = stock_basic.Stock_Basic(random_code)\n df = _base.init_data(_date)\n return df\n\ndef init_stock_data(random_code,_code,_days,_start,_end = None):\n logger.info(\"start deal %s !\"%(_code))\n _his = stock_his.Stock_His(random_code,_code)\n return _his.init_data(_start,_days,_end)\n\nif __name__ == \"__main__\":\n logger.info(\"start stock data prepare !\")\n random_code = \"test\"\n df = init_base_data(random_code)\n _df_index = df.index.values\n for i in range(len(_df_index)):\n _code = _df_index[i]\n _d1 = init_stock_data(random_code,_code,10,'2016-01-01')\n logger.info(\"complete stock data !\")","repo_name":"pulin2004/sample_scrapy","sub_path":"stock/stock_cal.py","file_name":"stock_cal.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"8690234554","text":"from rest_framework import serializers\nfrom .models import Folder, Image\n\nclass ImageDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Image\n fields = (\n 'id',\n 'get_original_url',\n 'get_thumbnail_url',\n 'created_at',\n 'updated_at',\n )\n\n\nclass ImageListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Image\n fields = (\n 'id',\n 'get_original_url',\n 'get_thumbnail_url',\n )\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n thumbnail = serializers.ImageField(read_only=True)\n\n class Meta:\n model = Image\n fields = \"__all__\"\n\n\nclass FolderSerializer(serializers.ModelSerializer):\n images_count = serializers.SerializerMethodField(read_only=True)\n\n def get_images_count(self, obj):\n return obj.image_set.count()\n\n class Meta:\n model = Folder\n fields = (\n 'id',\n 'name',\n 'images_count',\n 'created_at',\n )\n\n\nclass FolderDetailSerializer(serializers.ModelSerializer):\n images_count = serializers.SerializerMethodField(read_only=True)\n\n def get_images_count(self, obj):\n return obj.image_set.count()\n\n class Meta:\n model = Folder\n fields = (\n 'name',\n 'images_count',\n 'created_at',\n 'updated_at',\n )\n\n\nclass FolderImageSerializer(serializers.ModelSerializer):\n images = serializers.SerializerMethodField()\n\n def get_images_count(self, obj):\n return obj.image_set.count()\n\n def get_images(self, obj):\n instance = obj.image_set.all()\n serializer = ImageListSerializer(instance, many=True)\n return serializer.data\n\n class Meta:\n model = Folder\n fields = (\n 'name',\n 'images',\n 'created_at',\n 'updated_at',\n )\n","repo_name":"Azimjonm2333/SciCollabNet","sub_path":"src/apps/gallery/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"74392828947","text":"import pytest\n\nfrom pre_commit_hooks.check_json import main\nfrom testing.util import get_resource_path\n\n\n@pytest.mark.parametrize(\n ('filename', 'expected_retval'), (\n ('bad_json.notjson', 1),\n ('bad_json_latin1.nonjson', 1),\n ('ok_json.json', 0),\n ),\n)\ndef test_main(capsys, filename, expected_retval):\n ret = main([get_resource_path(filename)])\n assert ret == expected_retval\n if expected_retval == 1:\n stdout, _ = capsys.readouterr()\n assert filename in stdout\n","repo_name":"KitchenStories/pre-commit-hooks","sub_path":"tests/check_json_test.py","file_name":"check_json_test.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"73491932629","text":"import os\nfrom unittest import mock\n\nimport pytest\n\nfrom checklens import create_app, configure_redis\n\n\n@pytest.fixture\ndef app():\n with mock.patch(\"checklens.configure_redis\"):\n with mock.patch.dict(os.environ, {\"CHECK_FIELD_EXISTS\": \"field.child_a.child_b\"}, clear=True):\n app = create_app()\n yield app\n\n\n@pytest.fixture()\ndef client(app):\n return app.test_client()\n\n\n@mock.patch(\"checklens.configure_redis\", mock.MagicMock)\nclass TestAppInitialize:\n def test_normal_validation(self):\n with mock.patch.dict(os.environ, {\"CHECK_FIELD_EXISTS\": \"field.child_a.child_b\"}, clear=True):\n init_app = create_app()\n assert init_app.config[\"validate_field\"] == \"field.child_a.child_b\"\n\n def test_short_validation(self):\n with mock.patch.dict(os.environ, {\"CHECK_FIELD_EXISTS\": \"field.child_a\"}, clear=True):\n with mock.patch(\"sys.exit\") as exit_mock:\n create_app()\n exit_mock.assert_called_once_with(1)\n\n def test_unexist_validation(self):\n with mock.patch.dict(os.environ, {}, clear=True):\n with mock.patch(\"sys.exit\") as exit_mock:\n with pytest.raises(AttributeError):\n create_app()\n exit_mock.assert_called_once_with(1)\n\n def test_invalid_format_validation(self):\n with mock.patch.dict(os.environ, {\"CHECK_FIELD_EXISTS\": \"field_child_a_child_b\"}, clear=True):\n with mock.patch(\"sys.exit\") as exit_mock:\n create_app()\n exit_mock.assert_called_once_with(1)\n\n\nclass TestRedisInit:\n def test_without_host(self):\n with mock.patch.dict(os.environ, {}, clear=True):\n with pytest.raises(SystemExit):\n configure_redis()\n\n def test_connect_to_redis(self):\n with mock.patch.dict(os.environ, {\"REDIS_HOST\": \"localhost\"}, clear=True):\n with mock.patch(\"redis.Redis\") as r:\n configure_redis()\n r.assert_called_with(host=\"localhost\", port=\"6379\", db=\"0\")\n\n def test_custom_redis_params(self):\n with mock.patch.dict(os.environ, {\"REDIS_HOST\": \"locloc\", \"REDIS_PORT\": \"1234\", \"REDIS_DB\": \"11\"}, clear=True):\n with mock.patch(\"redis.Redis\") as r:\n configure_redis()\n r.assert_called_with(host=\"locloc\", port=\"1234\", db=\"11\")\n\n\n@pytest.mark.usefixtures(\"app\")\nclass TestChecklensResponses:\n def test_allow_only_post(self, client):\n resp = client.get(\"/\")\n assert resp.status_code == 405\n\n def test_check_key(self, client):\n body = {\"field\": {\"child_a\": {\"child_b\": \"value\"}}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 200\n\n def test_check_value(self, client):\n body = {\"field\": {\"child_a\": \"child_b\"}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 200\n\n def test_unexist_key(self, client):\n body = {\"field\": {\"child_a\": {\"child_c\": \"value\"}}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 403\n assert \"Field with value/key doesn't exist\" in resp.data.decode()\n\n def test_invalid_value(self, client):\n body = {\"field\": {\"child_a\": \"child_c\"}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 403\n assert \"Field with value/key doesn't exist\" in resp.data.decode()\n\n def test_invalid_child_a(self, client):\n body = {\"field\": {\"child_c\": \"child_d\"}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 403\n assert \"Field child_a doesn't exist in body[field]\" in resp.data.decode()\n\n def test_invalid_field(self, client):\n body = {\"fieldinv\": {\"child_a\": \"child_b\"}}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 403\n assert \"Field field doesn't exist in body\" in resp.data.decode()\n\n def test_invalid_structure(self, client):\n body = {\"field\": [\"a\", \"b\"]}\n resp = client.post(\"/\", json=body)\n assert resp.status_code == 403\n assert \"Field child_a doesn't exist in body[field]\" in resp.data.decode()\n\n def test_data_not_json(self, client):\n body = \"my_data\"\n resp = client.post(\"/\", data=body)\n assert resp.status_code == 403\n assert \"Body doesn't in JSON format\" in resp.data.decode()\n","repo_name":"gigimon/checklens","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"38530794453","text":"import os\n\nimport tensorflow as tf\n\nfrom tensorflow.keras import datasets, layers, models\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nfrom PIL import Image\nimport pathlib\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\ndef load_image(path):\n return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)\n\n\ndef prepare_single_image(img_path: str) -> np.array:\n img = Image.open(img_path)\n img = img.resize(size=(180, 180))\n return np.array(img) / 255.0\n\n\ndef display_image(image):\n plt.imshow(image, 'gray')\n\n\ndef loadnormal():\n train_dir = 'test\\\\normal'\n loadimage(train_dir)\n\n\ndef loadopacity():\n train_dir = 'test\\\\covid'\n loadimage(train_dir)\n\n\ndef loadimage(train_dir):\n for directory, subdirectories, files in os.walk(train_dir):\n for file in files:\n path = os.path.join(directory, file)\n img = load_image(path)\n display_image(img)\n plt.show()\n\n\ndef defineTrainingImages(pathFolder):\n data_dir = 'train'\n validation_data = 'val'\n batch_size = 64\n img_height = 150\n img_width = 150\n\n train_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir,\n seed=42,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n val_ds = tf.keras.utils.image_dataset_from_directory(\n validation_data,\n seed=42,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n class_names = train_ds.class_names\n print(class_names)\n\n plt.figure(figsize=(10, 10))\n for images, labels in train_ds.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")\n\n AUTOTUNE = tf.data.AUTOTUNE\n\n train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n\n num_classes = len(class_names)\n\n data_augmentation = tf.keras.Sequential([\n layers.RandomFlip(\"horizontal_and_vertical\"),\n layers.RandomRotation(0.2),\n layers.RandomTranslation(height_factor=0.2, width_factor=0.2)\n ])\n\n model = Sequential([\n layers.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),\n data_augmentation,\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(num_classes)\n ])\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n model.summary()\n\n epochs = 20\n history = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n )\n\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs_range = range(epochs)\n\n plt.figure(figsize=(8, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, acc, label='Training Accuracy')\n plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n plt.legend(loc='lower right')\n plt.title('Training and Validation Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n plt.show()\n\n num_covid, num_correct_covid = 0, 0\n num_normal, num_correct_normal = 0, 0\n\n for directory, subdirectories, files in os.walk(pathFolder):\n for file in files:\n path = os.path.join(directory, file)\n img = tf.keras.utils.load_img(\n path, target_size=(img_height, img_width)\n )\n i += 1\n img_array = tf.keras.utils.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0)\n\n predictions = model.predict(img_array)\n pred = predictions.argmax()\n if pred == 0:\n num_correct_covid += 1\n else:\n num_correct_normal += 1\n score = tf.nn.softmax(predictions[0])\n\n print(\n \"{}. image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(i, class_names[np.argmax(score)], 100 * np.max(score))\n )\n\n print(num_correct_normal)\n print(num_correct_covid)\n\n\ndef loadnormal():\n pathFolder = 'test\\\\normal'\n defineTrainingImages(pathFolder)\n\n\ndef loadcovid():\n pathFolder = 'test\\\\covid'\n defineTrainingImages(pathFolder)\n\n\nloadcovid()\nloadnormal()\n","repo_name":"Nebojsa1999/Soft","sub_path":"SoftProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"41394167709","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\n# 6. bfs함수 선언\r\ndef bfs(x, y) :\r\n global max_area\r\n # 7. 큐 생성\r\n queue = deque()\r\n # 8. 큐에 현재 위치 append\r\n queue.append((x, y))\r\n visited[x][y] = True\r\n area = 1\r\n # 9.\r\n while queue :\r\n # 10. 위치 반환\r\n x, y = queue.popleft()\r\n # 11.\r\n for i in range(4) :\r\n nx, ny = x + dirs[i][0], y + dirs[i][1]\r\n # 12. 다음 위치가 범위를 벗어나는 경우\r\n if nx < 0 or nx >= n or ny < 0 or ny >= m : continue\r\n if graph[nx][ny] and not visited[nx][ny] :\r\n visited[nx][ny] = True\r\n area += 1\r\n graph[nx][ny] = area\r\n # 13. 최대 넓이 갱신\r\n max_area = max(max_area, area)\r\n # 14. 방문 처리 후 다음 위치 큐에 삽입\r\n queue.append((nx, ny))\r\n\r\nn, m = map(int, input().split())\r\ngraph = [list(map(int, input().split())) for _ in range(n)]\r\ndirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]\r\n# 1. 방문 여부 리스트 생성\r\nvisited = [[False] * m for _ in range(n)]\r\n# 2 .그림의 개수, 최대 넓이 변수 생성\r\ncount, max_area = 0, 0\r\n# 3.\r\nfor i in range(n) :\r\n for j in range(m) :\r\n # 4. 해당 위치 값이 1이면서 방문한 적이 없을 경우 bfs 함수 실행\r\n if graph[i][j] and not visited[i][j] :\r\n bfs(i, j)\r\n count += 1\r\nif count and not max_area : max_area = 1\r\n# 5. 결과 출력\r\nprint(f'{count}\\n{max_area}')","repo_name":"Oneul-hyeon/Algorithm","sub_path":"백준/Silver/1926. 그림/그림.py","file_name":"그림.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"1286945595","text":"import pika, sys, os\n\ndef main():\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n \n # Se crea la cola con nombre hello, si ya existe no hace nada\n channel.queue_declare(queue='hello')\n\n # Se define la funcion callback que va a atender el mensaje.\n def callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n\n # Se establece la conexion del callback con los mensajes que lleguen de la cola hello.\n # Auto_ack=True desactiva el aviso que se manda al terminar una task.\n # Si no se recibe dicho aviso dentro de un timeout definido (30' por defecto), se\n # vuelve a mandar el mensaje.\n channel.basic_consume(queue='hello', on_message_callback=callback, auto_ack=True)\n\n print(' [*] Waiting for messages. To exit press CTRL+C')\n \n # Se pone a consumir.\n channel.start_consuming()\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","repo_name":"gonzacastro/rabbit-mq-tutorial","sub_path":"m1-hello-world/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"29292998272","text":"import itertools\n\nH, W, K = map(int, input().split())\nA = [[int(x) for x in input()] for _ in range(H)]\n\n\ndef solve(blocks):\n n_vert_cut = 0\n n_block = len(blocks)\n vals = [0 for _ in range(n_block)]\n\n for c in range(W):\n adds = [block[c] for block in blocks]\n if any(add > K for add in adds):\n return H * W\n\n sums = [val + add for val, add in zip(vals, adds)]\n if any(x > K for x in sums):\n n_vert_cut += 1\n vals = adds\n else:\n vals = sums\n\n return n_vert_cut + n_block - 1\n\n\nans = H * W\nfor mask in itertools.product([0, 1], repeat=H - 1):\n mask = [1] + list(mask) + [1]\n pivots = [r for r in range(H + 1) if mask[r]]\n blocks = [A[p1:p2] for p1, p2 in zip(pivots[:-1], pivots[1:])]\n blocks = [[sum(row[c] for row in block) for c in range(W)] for block in blocks]\n # print(blocks, '->', solve(blocks))\n ans = min(ans, solve(blocks))\nprint(ans)\n","repo_name":"amoshyc/cp-code","sub_path":"atcoder/abc159/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"19220868929","text":"import unittest\nimport os\nimport redis\nimport time\n\nfrom caching import Cache\n\nredisHost = os.getenv(\"REDISHOST\", \"127.0.0.1\")\nredisPort = os.getenv(\"REDISPORT\", 6379)\nredisDB = os.getenv(\"REDISDB\", 0)\nmaxKeys = 5\nexpiryTime = 2\n\nclass TestRedisProxy(unittest.TestCase):\n cache = Cache()\n cache.setDebug(False)\n cache.setRedis(redisHost, redisPort, redisDB)\n cache.setExpiry(expiryTime)\n cache.setMaxKeys(maxKeys)\n\n def test_cache(self):\n i=0\n while i <= 2 * maxKeys:\n data = self.cache.get(\"testkey-%s\"%(i)).data\n self.assertEqual(int(data), i)\n i = i + 1 \n \n def test_missing(self):\n self.assertEqual(self.cache.get(\"nosuchkey\"), False)\n \n def test_lru(self):\n data = self.cache.get(\"lrutest\")\n lruDate = data.created\n i = 0\n while i <= 2 * maxKeys:\n self.cache.get(\"testkey-%s\"%(i))\n i = i + 1\n time.sleep(1)\n data = self.cache.get(\"lrutest\")\n self.assertNotEqual(data.created, lruDate)\n \n def test_expiry(self):\n entry = self.cache.get(\"testkey-%s\"%(1))\n time.sleep(expiryTime+1)\n newentry = self.cache.get(\"testkey-%s\"%(1))\n self.assertNotEqual(entry.created, newentry.created)\n \n\ndef preloadRedis():\n print (\"Preloading redis with %i values\"%(2 * maxKeys))\n conn = redis.Redis(host=redisHost, port=redisPort, db=redisDB)\n try:\n conn.ping\n except Exception as ex:\n print ('Error:', ex)\n exit('Failed to connect to Redis server')\n \n i = 0\n while i <= 2 * maxKeys:\n conn.set(\"testkey-%s\"%(i), i)\n i = i + 1\n conn.set(\"lrutest\", \"lrupayload\")\n\nif __name__ == '__main__':\n preloadRedis()\n unittest.main()","repo_name":"prg3/redis-proxy","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"4080392166","text":"from flask import current_app as app\n\nfrom model import Movie, session\nfrom constant.common_constant import ACTIVE\n\n\ndef update_movie(current_user, **kwargs):\n app.logger.debug(\"Inside update_movie functionality.\")\n movie_id = kwargs.pop(\"movie_id\")\n\n def _prepare_update_movie():\n app.logger.info(\"Movie ID :: {}, user :: {}\".format(\n movie_id, current_user.get(\"user_name\")\n ))\n\n update_dict = dict()\n if 'movie_name' in kwargs.keys():\n update_dict[\"name\"] = kwargs.pop(\"movie_name\")\n\n if 'genre' in kwargs.keys():\n update_dict['genre'] = str(kwargs.pop(\"genre\"))\n\n update_dict.update(kwargs)\n\n update_count = session.query(Movie).filter(\n Movie.id == movie_id,\n Movie.is_deleted == ACTIVE\n ).update(update_dict)\n\n app.logger.info(\"Update Count :: {}\".format(update_count))\n\n def _prepare_response():\n app.logger.debug(\"Preparing for response dict.\")\n return dict(\n success=True,\n message=\"Movie has been successfully updated.!!!\",\n data=dict(\n movie_id=movie_id\n )\n )\n\n _prepare_update_movie()\n return _prepare_response()\n","repo_name":"mehta-smit/imdb-movie","sub_path":"imdb/functionality/movie/update_movie.py","file_name":"update_movie.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"38117010654","text":"import logging\nfrom threading import Lock, Thread\nimport cflib\nimport math\nimport traceback\nfrom constants import constants\nfrom cflib.crazyflie import Crazyflie\nfrom cflib.crazyflie.log import LogConfig\nfrom cflib.crazyflie.syncCrazyflie import SyncCrazyflie\nfrom cflib.crazyflie.syncLogger import SyncLogger\nfrom cflib.bootloader import Bootloader\nfrom socketClient import sendDroneData, sendMapPoints, sendDronePosition\nfrom interfaces.drone import Drone\nfrom mapHelper import MapHelper\nfrom customLogger import CustomLogger\nfrom enums.commands import Commands\n\nlogging.basicConfig(level=logging.ERROR)\n\nVOLTAGE_DICT = {\n 4.2: 100,\n 4.15: 95,\n 4.11: 90,\n 4.08: 85,\n 4.02: 80,\n 3.98: 75,\n 3.95: 70,\n 3.91: 65,\n 3.87: 60,\n 3.85: 55,\n 3.84: 50,\n 3.82: 45,\n 3.80: 40,\n 3.79: 35,\n 3.77: 30,\n 3.75: 25,\n 3.73: 20,\n 3.71: 15,\n 3.69: 10,\n 3.61: 5,\n 3.27: 0\n}\n\n\"\"\"\nThis file is a singleton class that communicates with the drone(s).\nIt takes care of all the basic logging and commands to the Crazyflie drone(s).\nall the logging is done in the \"getDroneInfo\" method.\nTo control the drone with the parameters, we use the \"communication\" method.\nThe other methods are to take care of errors and the logging data.\n\"\"\"\n\nclass SingletonMeta(type):\n _instances = {}\n _lock: Lock = Lock()\n\n def __call__(cls, *args, **kwargs):\n \n with cls._lock:\n\n if cls not in cls._instances:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n return cls._instances[cls]\n\nclass BaseStation(metaclass=SingletonMeta):\n mapHelper = MapHelper()\n logger = None\n dronesInfos = dict()\n crazyFlie = []\n\n def __init__(self):\n self.logger = CustomLogger()\n \n def setUp(self, link_uri):\n self.link = link_uri\n self.bootloader = Bootloader()\n for cf in self.crazyFlie:\n cf.close_link()\n self.crazyFlie.remove(cf)\n for i in link_uri:\n newCrazyflie = Crazyflie()\n newCrazyflie.connected.add_callback(self._connected)\n newCrazyflie.disconnected.add_callback(self._disconnected)\n newCrazyflie.connection_failed.add_callback(self._connection_failed)\n newCrazyflie.connection_lost.add_callback(self._connection_lost)\n newCrazyflie.open_link(i)\n self.crazyFlie.append(newCrazyflie)\n print('Connecting to %s' % i)\n\n def _connected(self, link_uri):\n from webRouting import sendResponse\n\n stringNumber = link_uri[-2:]\n droneId = int(stringNumber, constants.BASE_16)\n self.dronesInfos[droneId] = [droneId, None, None, None, None, None, None, None, None, None, None, None]\n\n sendResponse.set()\n sendResponse.clear()\n \n Thread(target=self.communication(Commands.TAKE_OFF, 0)).start()\n Thread(target=self.getDroneInfo(link_uri)).start()\n \n\n def _basicDroneInfo_log_data(self, timestamp, data, logconf):\n id = data['droneInfo.droneNumber']\n batteryVoltage = round(data['pm.vbat'], constants.ROUND_THOUSANDTH)\n batteryLvl = self.convertBatteryVoltage(batteryVoltage)\n state = data['droneInfo.state']\n speed = self.calculateSpeed(data['stateEstimate.vx'], data['stateEstimate.vy'], data['stateEstimate.vz'])\n droneInfo = Drone(id, batteryLvl, state, speed)\n sendDroneData(droneInfo)\n self.dronesInfos[id][constants.TABLE_POSITION_STATE] = state \n self.dronesInfos[id][constants.TABLE_POSITION_SPEED] = speed\n self.dronesInfos[id][constants.TABLE_POSITION_BATTERY] = batteryLvl\n self.sendDroneInfo()\n\n def _droneRangingDeck_log_data(self, timestamp, data, logconf):\n id = data['droneInfo.droneNumber']\n front = self.mapHelper.checkRangeValues(data['range.front'])\n back = self.mapHelper.checkRangeValues(data['range.back'])\n left = self.mapHelper.checkRangeValues(data['range.left'])\n right = self.mapHelper.checkRangeValues(data['range.right'])\n up = self.mapHelper.checkRangeValues(data['range.up'])\n down = self.mapHelper.checkRangeValues(data['range.zrange'])\n self.dronesInfos[id][constants.TABLE_POSITION_DISTANCE_FRONT] = front\n self.dronesInfos[id][constants.TABLE_POSITION_DISTANCE_LEFT] = left\n self.dronesInfos[id][constants.TABLE_POSITION_DISTANCE_BACK] = back\n self.dronesInfos[id][constants.TABLE_POSITION_DISTANCE_RIGHT] = right\n self.sendDroneInfo()\n\n def _dronePosition_log_data(self, timestamp, data, logconf):\n id = data['droneInfo.droneNumber']\n positionx = data['stateEstimate.x']\n positiony = data['stateEstimate.y'] - (0.5 * (id - 1))\n positionz = data['stateEstimate.z']\n yaw = data['stabilizer.yaw'] * (math.pi / 180)\n self.dronesInfos[id][constants.TABLE_POSITION_POSX] = positionx\n self.dronesInfos[id][constants.TABLE_POSITION_POSY] = positiony\n self.dronesInfos[id][constants.TABLE_POSITION_POSZ] = positionz\n self.dronesInfos[id][constants.TABLE_POSITION_YAW] = yaw\n self.sendDroneInfo()\n\n def _drone_log_error(self, logconf, msg):\n error = 'Error when logging ' + logconf.name + ': ' + msg\n print(error)\n self.logger.logErrors(error)\n \n def _connection_failed(self, link_uri, msg):\n try:\n self.crazyFlie.remove(self.getCrazyflieFromUri(link_uri))\n except:\n self.logger.logErrors(traceback.format_exc())\n finally:\n error = 'Connection to ' + link_uri + ' failed: ' + msg\n print(error)\n self.logger.logErrors(error)\n \n\n def _connection_lost(self, link_uri, msg):\n try:\n self.crazyFlie.remove(self.getCrazyflieFromUri(link_uri))\n except:\n self.logger.logErrors(traceback.format_exc())\n finally:\n error = 'Connection to ' + link_uri + ' lost: ' + msg\n print(error)\n self.logger.logErrors(error)\n\n def _disconnected(self, link_uri):\n error = 'Disconnected from ' + link_uri \n print(error)\n self.logger.logErrors(error)\n\n def getCrazyflieFromUri(self, link_uri):\n for cf in self.crazyFlie:\n if cf.link_uri == link_uri:\n return cf\n error = 'Crazyflie not found with uri: ' + link_uri\n print(error)\n self.logger.logErrors(error)\n return None\n\n def communication(self, command: Commands, value):\n for i in range(len(self.crazyFlie)):\n self.crazyFlie[i].param.set_value('commands.' + command.value, value)\n return\n\n def getDroneInfo(self, link_uri):\n # There are 3 logs because each log is limited to 26 bytes of \"data\", uint8 = 1 byte, float = 4, uint16 = 2.\n basicDroneInfo = LogConfig(name='basicDroneInfo', period_in_ms = constants.TIME_TO_REFRESH_MS)\n basicDroneInfo.add_variable('droneInfo.droneNumber', 'uint8_t')\n basicDroneInfo.add_variable('pm.vbat', 'float')\n basicDroneInfo.add_variable('droneInfo.state', 'uint8_t')\n basicDroneInfo.add_variable('stateEstimate.vx', 'float')\n basicDroneInfo.add_variable('stateEstimate.vy', 'float')\n basicDroneInfo.add_variable('stateEstimate.vz', 'float')\n\n droneRangingDeck = LogConfig(name=\"rangingDeck\", period_in_ms = constants.TIME_TO_REFRESH_MS)\n droneRangingDeck.add_variable('droneInfo.droneNumber', 'uint8_t')\n droneRangingDeck.add_variable('range.front', 'uint16_t')\n droneRangingDeck.add_variable('range.back', 'uint16_t')\n droneRangingDeck.add_variable('range.left', 'uint16_t')\n droneRangingDeck.add_variable('range.right', 'uint16_t')\n droneRangingDeck.add_variable('range.up', 'uint16_t')\n droneRangingDeck.add_variable('range.zrange', 'uint16_t')\n\n dronePosition = LogConfig(name='dronePosition', period_in_ms = constants.TIME_TO_REFRESH_MS)\n dronePosition.add_variable('droneInfo.droneNumber', 'uint8_t')\n dronePosition.add_variable('stateEstimate.x', 'float')\n dronePosition.add_variable('stateEstimate.y', 'float')\n dronePosition.add_variable('stateEstimate.z', 'float')\n dronePosition.add_variable('stabilizer.yaw', 'float')\n\n try:\n crazyflie = self.getCrazyflieFromUri(link_uri)\n if (crazyflie is None):\n return\n crazyflie.log.add_config(basicDroneInfo)\n basicDroneInfo.data_received_cb.add_callback(self._basicDroneInfo_log_data)\n basicDroneInfo.error_cb.add_callback(self._drone_log_error)\n basicDroneInfo.start()\n\n crazyflie.log.add_config(droneRangingDeck)\n droneRangingDeck.data_received_cb.add_callback(self._droneRangingDeck_log_data)\n droneRangingDeck.error_cb.add_callback(self._drone_log_error)\n droneRangingDeck.start()\n\n crazyflie.log.add_config(dronePosition)\n dronePosition.data_received_cb.add_callback(self._dronePosition_log_data)\n dronePosition.error_cb.add_callback(self._drone_log_error)\n dronePosition.start()\n\n except KeyError as e:\n error = 'Could not start log config, ' + format(str(e)) + ' not found in TOC'\n print(error)\n self.logger.logErrors(error)\n except AttributeError as ee:\n error = 'Could not add attribute log config, bad configuration: ' + format(str(ee))\n print(error)\n self.logger.logErrors(error)\n\n def flash(self, link_uri, filename):\n value = self.rebootToBootloader(link_uri)\n \n targets = {}\n targets['stm32'] = (\"fw\",)\n try:\n self.bootloader.flash(str(filename), targets)\n except:\n self.logger.logErrors(traceback.format_exc())\n self.bootloader.reset_to_firmware()\n print(\"Error while updating. Rebooting.\")\n return 1\n print(\"Update successful, rebooting.\")\n self.bootloader.reset_to_firmware()\n self.bootloader.close()\n return value\n\n def rebootToBootloader(self, link_uri): \n self.bootloader.clink = link_uri\n self.getCrazyflieFromUri(link_uri).close_link()\n\n try:\n success = self.bootloader.start_bootloader(warm_boot=True)\n if not success:\n print(\"Could not connect to bootloader\")\n return 1\n else:\n print(\"Flashing starting...\")\n return 0\n except :\n self.logger.logErrors(traceback.format_exc())\n return 1\n\n def sendDroneInfo(self):\n for droneId in self.dronesInfos:\n if None not in self.dronesInfos[droneId]:\n if (self.dronesInfos[droneId][1] == 1): #if the drone is flying\n self.logger.logPoints(self.dronesInfos[droneId])\n if (not (\n self.dronesInfos[droneId][7] == -1 or\n self.dronesInfos[droneId][8] == -1 or\n self.dronesInfos[droneId][9] == -1 or\n self.dronesInfos[droneId][10] == -1\n )):\n mapPointFront = self.mapHelper.computeMapPoint(\n self.dronesInfos[droneId][11],\n self.dronesInfos[droneId][7],\n self.dronesInfos[droneId][4] * 1000,\n self.dronesInfos[droneId][5] * 1000\n )\n mapPointBack = self.mapHelper.computeMapPoint(\n self.dronesInfos[droneId][11] + math.pi,\n self.dronesInfos[droneId][9],\n self.dronesInfos[droneId][4] * 1000,\n self.dronesInfos[droneId][5] * 1000\n )\n mapPointLeft = self.mapHelper.computeMapPoint(\n self.dronesInfos[droneId][11] + (math.pi/2),\n self.dronesInfos[droneId][8],\n self.dronesInfos[droneId][4] * 1000,\n self.dronesInfos[droneId][5] * 1000\n )\n mapPointRight = self.mapHelper.computeMapPoint(\n self.dronesInfos[droneId][11] + (3 * math.pi / 2),\n self.dronesInfos[droneId][10],\n self.dronesInfos[droneId][4] * 1000,\n self.dronesInfos[droneId][5] * 1000\n )\n sendMapPoints([mapPointFront, mapPointBack, mapPointLeft, mapPointRight])\n dronePosition = self.mapHelper.computeMapPoint(\n 0,\n 0,\n self.dronesInfos[droneId][4] * 1000,\n self.dronesInfos[droneId][5] * 1000\n )\n sendDronePosition(droneId, dronePosition)\n self.dronesInfos[droneId] = [droneId, None, None, None, None, None, None, None, None, None, None, None]\n\n def calculateSpeed(self, x, y, z):\n speed = math.sqrt(math.pow(x, constants.SQUARE_POWER) + math.pow(y, constants.SQUARE_POWER) \\\n + math.pow(z, constants.SQUARE_POWER))\n return round(speed, constants.ROUND_THOUSANDTH)\n\n def convertBatteryVoltage(self, batteryVoltage):\n for key in VOLTAGE_DICT:\n if (key <= batteryVoltage):\n return VOLTAGE_DICT[key]\n return 0\n","repo_name":"Nicolas-Lauzon/INF3995_BACKUP","sub_path":"crazysystem-master/crazysystem-master/Server/src/baseStation.py","file_name":"baseStation.py","file_ext":"py","file_size_in_byte":13686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"70889860309","text":"def solution(numbers):\n answer = ''\n numbers=list(map(str, numbers))\n numbers.sort(key=lambda x : x*3 , reverse=True)\n answer=str(int(answer.join(numbers)))\n \"\"\"\n str(int(answer.join(numbers)))에서 int는 [0,0,0]의 경우 정답이 0으로 출력되도록 만들기 위함.\n \"\"\"\n \n return answer\n\n#print(solution([6,0,2]))\n#print(solution([0,0,0]))\n","repo_name":"helloju817/Programmers","sub_path":"level2/가장 큰 수/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"9037731298","text":"import sqlite3\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/test')\ndef test():\n # open the connection to the database\n conn = sqlite3.connect('california_house_data.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # fetch data from the deployments table\n cur.execute(\"select * from test\")\n rows_deploy = cur.fetchall()\n conn.close()\n return render_template('test.html', rows_deploy=rows_deploy)\n\n@app.route('/train')\ndef train():\n # open the connection to the database\n conn = sqlite3.connect('california_house_data.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # fetch data from the status table\n cur.execute(\"select * from train\")\n rows_status = cur.fetchall()\n conn.close()\n return render_template('train.html', rows_status=rows_status)","repo_name":"KyleXus/Advanced-Programming","sub_path":"california_house.py","file_name":"california_house.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"16169806448","text":"from .baseobject import SapoObject\n\n\nclass PriceRule(SapoObject):\n def __init__(self, *args, **kwargs):\n self.id = None\n self.created_on = None\n self.title = None\n self.target_type = None\n self.target_selection = None\n self.allocation_method = None\n self.value_type = None\n self.value = None\n self.exclude_type = None\n self.once_per_customer = None\n self.usage_limit = None\n self.customer_selection = None\n self.prerequisite_saved_search_ids = None\n self.prerequisite_subtotal_range = None\n self.prerequisite_quantity_range = None\n self.prerequisite_shipping_price_range = None\n self.entitled_product_ids = None\n self.entitled_collection_ids = None\n self.entitled_country_ids = None\n self.starts_on = None\n self.ends_on = None\n\n super(PriceRule, self).__init__(*args, **kwargs)\n\n @classmethod\n def get(cls, api, params={}):\n price_rule = cls()\n data = api.get(\"admin/price_rules.json\", params=params)\n price_rule.load(data.get(\"price_rules\", {}))\n return price_rule\n\n @classmethod\n def list(cls, api, params={}):\n total_price_rules = api.get(\"admin/price_rules/count.json\", params=params)\n limit = params.get(\"limit\", 50)\n params[\"limit\"] = limit\n total_pages = total_price_rules.get(\"count\", 0) // limit + 1\n price_rules = []\n for page in range(1, total_pages + 1):\n params[\"page\"] = page\n data = api.get(\"admin/price_rules.json\", params=params)\n for price_rule in data[\"price_rules\"]:\n price_rule_obj = cls()\n price_rule_obj.load(price_rule)\n price_rules.append(price_rule_obj)\n\n return price_rules\n","repo_name":"tinnguyen189atnos/python-sapo","sub_path":"sapo/PriceRule.py","file_name":"PriceRule.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"5016111354","text":"import numpy as np\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.gen_random import random_gaussian\nfrom tests.common.test_op.gpu.lu import lu\n \ndef lu_solver(a, b):\n out = np.full(b.shape, np.nan, b.dtype)\n len = a.shape[0]\n for n in range(len):\n i = len - n - 1\n out[i, 0] = b[i, 0] / a[i, i]\n for j in range(i):\n b[j, 0] = b[j, 0] - a[j, i] * out[i, 0]\n return out\n\ndef gen_data(shape1, shape2, dtype1, dtype2):\n input1 = random_gaussian(shape1, miu=1, sigma=0.1).astype(dtype1)\n input2 = random_gaussian(shape2, miu=1, sigma=0.1).astype(dtype2)\n expect = lu_solver(np.array(input1), np.array(input2))\n return input1, input2, expect \n\ndef lu_run(shape1, shape2, dtype1, dtype2, poly_sch=True, attrs=None):\n attrs[\"pragma_enable_schedule_outer_coincidence\"] = True\n mod = utils.op_build_test(lu, [shape1, shape2],\n [dtype1, dtype2], polyhedral=poly_sch,\n attrs=attrs, kernel_name=\"lu\")\n input1, input2, expect = gen_data(shape1, shape2, dtype1, dtype2)\n output = np.full(expect.shape, np.nan, expect.dtype)\n output = utils.mod_launch(mod, (input1, input2, output), expect=expect)\n rtol = atol = 1e-03\n res = compare_tensor(output, expect, rtol=rtol, atol=atol)\n print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))\n return (input1, input2), output, expect, res\n","repo_name":"mindspore-ai/akg","sub_path":"tests/common/test_run/gpu/lu_run.py","file_name":"lu_run.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"49"} +{"seq_id":"22456056544","text":"import argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--clf\", action=\"store_true\")\n args = parser.parse_args()\n if args.clf:\n from .classifier.model import MLPClassifier\n from .classifier.data import prepare\n from .classifier.interactive import record\n\n data = prepare(\"./data\", \"./data/mfcc_data.pkl\")\n clf = MLPClassifier.load_from_checkpoint(\n r\"lab\\classifier\\epoch=199-step=360600.ckpt\", num_of_classes=len(data)\n )\n\n print('Start recording...')\n record(clf, data)\n","repo_name":"Wybxc/LectureAI-Experiments","sub_path":"voice_changer/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"18994997639","text":"# coding: utf8\n\nfrom rust.core.business import ParamObject\nfrom rust.core.api import ApiResource, Resource\nfrom rust.core.decorator import param_required\nfrom rust.core.exceptions import BusinessError\n\nfrom business.dynamic.like_service import LikeService\n\n\n@Resource('dynamic.approval')\nclass AApproval(ApiResource):\n\t\"\"\"\n\t点赞\n\t\"\"\"\n\t@param_required(['user', 'dynamic_id'])\n\tdef put(self):\n\t\t\"\"\"\n\t\t点赞\n\t\t\"\"\"\n\t\tuser = self.params['user']\n\t\tparam_object = ParamObject({\n\t\t\t'user_id': user.id,\n\t\t\t'dynamic_id': self.params['dynamic_id']\n\t\t})\n\t\tapproval = LikeService(user).like(param_object)\n\n\t\treturn {\n\t\t\t'id': approval.id\n\t\t}\n\n\t@param_required(['user', 'id'])\n\tdef delete(self):\n\t\t\"\"\"\n\t\t点赞取消\n\t\t\"\"\"\n\t\tuser = self.params['user']\n\t\tapproval = LikeService(user).get_approval_by_id(self.params['id'])\n\t\tif approval.user_id != user.id:\n\t\t\traise BusinessError(u'操作无权限')\n\t\tparam_object = ParamObject({\n\t\t\t'id': self.params['id']\n\t\t})\n\t\tLikeService(user).dislike(param_object)\n\n\t\treturn {}\n","repo_name":"Peri-albert/enjoyfun","sub_path":"api/dynamic/a_approval.py","file_name":"a_approval.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"16187073416","text":"# Name: BZ2 Folder Compression\n# Author: Braden1996\n# Description: This goes through and compresses every file in the source directory\n# and saves the result to the output directory. It was designed for use with FastDL\n# for the big-booty-bitches.com community\n\nfrom bz2 import compress\nfrom os import makedirs, walk\nfrom os.path import exists, join, isfile, splitext\nfrom random import randint\nfrom PyQt4.QtCore import QThread, SIGNAL\nimport subprocess\n\nclass FastDLFolder():\n \"\"\" This class represents a single folder that is to be prepared for FastDL.\n This includes compressing to BZIP2 and generating resource.AddFile(). \"\"\"\n def __init__(self, statusbar, executeType):\n \"\"\" Initiliases our class. \"\"\"\n self.statusbar = statusbar\n self.validInputs = False # Internal attribute used to check if inputs are correct\n self.replaceOld = False # Replace pre-existing zip files\n self.generateResource = True # Generate resource.AddFile's\n \n # Used to filter the files so we know what to Zip and create resource.AddFile()s\n # If you know any other file-types, let me know!\n self.filter = {\"maps\": set([\".bsp\"]),\n \"materials\": set([\".vmt\", \".vtf\", \".png\"]),\n \"models\": set([\".vtx\", \".mdl\", \".phy\", \".vvd\"]),\n \"sound\": set([\".mp3\", \".wav\"]),\n \"particles\": set([\".pcf\"]),\n \"resource\": set([\".ttf\"])}\n \n def sendMessage(self, msg):\n self.statusbar.showMessage(msg)\n \n def setSourceDir(self, newDir):\n \"\"\" This is used to set the directory we need compressed. \"\"\"\n newDir = str(newDir).replace(\"/\", \"\\\\\")\n \n if not exists(newDir):\n self.validInputs = False\n return False\n \n self.sourceDir = newDir\n self.validInputs = True\n\n def setOutputDir(self, newDir):\n \"\"\" This is used to set the directory where we put our compressed files. \"\"\"\n newDir = str(newDir).replace(\"/\", \"\\\\\")\n if newDir[-1:] != \"\\\\\":\n newDir += \"\\\\\"\n \n if not exists(newDir):\n makedirs(newDir)\n\n self.outputDir = newDir\n\n def cropPath(self, path):\n \"\"\" Crops out the unneeded parts of the given path. Returns False if all invalid. \"\"\"\n dirList = path.split(\"\\\\\")\n for i in range(len(dirList)):\n if dirList[0] in self.filter:\n if splitext(path)[1] in self.filter[dirList[0]]:\n return \"\\\\\".join(dirList), \"\\\\\".join(dirList[0:len(dirList)-1]) + \"\\\\\"\n else:\n return False, False\n else:\n dirList.pop(0)\n return False, False\n \n def runCompression(self):\n \"\"\" This executes the compression process. \"\"\"\n if not self.validInputs:\n if hasattr(self, \"sourceDir\") and type(self.sourceDir) == \"string\":\n self.sendMessage(\"You cannot compress the folder '(\" + self.sourceDir + \")' as it doesn't exist!\")\n else:\n self.sendMessage(\"You have not entered a source directory, or it of an invalid type!\")\n return False\n\n count = 0\n if self.generateResource: self.resourceStr = \"if (SERVER) then\"\n for root, dirs, files in walk(self.sourceDir):\n for curFile in files:\n path = join(root, curFile)\n cropPath, cropRoot = self.cropPath(path)\n if not cropPath:\n continue\n with open(path, \"rb\") as fileObj:\n if not exists(join(self.outputDir, cropRoot)):\n makedirs(join(self.outputDir, cropRoot))\n\n bz2Path = join(self.outputDir, cropPath)\n if self.replaceOld or not (isfile(bz2Path) or isfile(bz2Path + \".bz2\")):\n if self.generateResource: self.resourceStr += \"\\n\\tresource.AddSingleFile(\\\"\" + cropPath.replace(\"\\\\\", \"/\") + \"\\\")\"\n fileObjContent = fileObj.read()\n bz2ObjContent = compress(fileObjContent)\n if len(bz2ObjContent) > fileObj.tell():\n with open(bz2Path, \"wb\") as newFileObj:\n newFileObj.write(fileObjContent)\n else:\n with open(bz2Path + \".bz2\", \"wb\") as bz2FileObj:\n bz2FileObj.write(bz2ObjContent)\n count += 1\n output = \"Finished! We compressed '\" + str(count) + \"' files!\"\n\n if self.generateResource and count > 0:\n self.resourceStr += \"\\nend\"\n with open(join(self.outputDir, \"fastdl_\" + str(randint(1, 9999)) + \".lua\"), \"w\") as resourceFileObj: resourceFileObj.write(self.resourceStr)\n output += \" Your resource file was also generated!\"\n\n self.sendMessage(output)\n\n def unpackGma(self):\n \"\"\" This scans for .gma files and unpacks them. \"\"\"\n if not self.validInputs:\n if hasattr(self, \"sourceDir\") and type(self.sourceDir) == \"string\":\n self.sendMessage(\"You cannot compress the folder '(\" + self.sourceDir + \")' as it doesn't exist!\")\n else:\n self.sendMessage(\"You have not entered a source directory, or it of an invalid type!\")\n return False\n\n if not exists(self.outputDir):\n makedirs(self.outputDir)\n\n count = 0\n for root, dirs, files in walk(self.sourceDir):\n for curFile in files:\n path = join(root, curFile)\n if splitext(path)[1] == \".gma\":\n printPath = path.split(\"\\\\\")\n self.sendMessage(\"Unpacking: '\" + printPath[len(printPath)-1] + \"'\")\n p = subprocess.Popen([\"lib/gmadconv.exe\", path], cwd=self.outputDir)\n p.wait()\n count += 1\n self.sendMessage(\"Finished! We unpacked '\" + str(count) + \"' .gma files!\")\n","repo_name":"Braden1996/garrysmod-fastdl-generator","sub_path":"src/lib/compressfastdl.py","file_name":"compressfastdl.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"49"} +{"seq_id":"35897061253","text":"\nfrom PIL.Image import merge\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [8, 8]\nimport sys\nimport argparse\nsys.path.append('./')\n\nfrom lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results\nfrom lib.test.evaluation import get_dataset, trackerlist\n\nparser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')\nparser.add_argument('tracker_name', type=str, help='Name of tracking method.')\nparser.add_argument('tracker_param', type=str, help='Name of config file.')\nparser.add_argument('--runid', type=int, default=None, help='The run id.', nargs='+')\nparser.add_argument('--dataset_name', type=str, default='got10k_val', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')\nparser.add_argument('--epoch', type=int, default=100, help='epoch')\n\n\nargs = parser.parse_args()\n\ntrackers = []\n\nparams_dict = {'checkpoint': args.epoch}\nparams_dict['windows_factor'] = 0.5\nparams_dict['interval'] = 25\nparams_dict['debug'] = 0\nparams_dict['cpu'] = 0\n\ntrackers.extend(trackerlist(name=args.tracker_name, parameter_name=args.tracker_param, dataset_name=args.dataset_name,\n run_ids=args.runid, display_name='Track', params_dict=params_dict))\n\nif \"got10k\" in args.dataset_name:\n report_name = 'got10k'\nelse:\n report_name = args.dataset_name\nmerge_results=False\ndataset = get_dataset(args.dataset_name)\nplot_results(trackers, dataset, report_name, merge_results=merge_results, plot_types=('success', 'norm_prec'),\n skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05)\nprint_results(trackers, dataset, report_name, merge_results=merge_results, plot_types=('success', 'prec', 'norm_prec'))\nprint_per_sequence_results(trackers, dataset,report_name,merge_results=merge_results)\n","repo_name":"RISC-NYUAD/SiamTPNTracker","sub_path":"tools/analysis_results.py","file_name":"analysis_results.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"49"} +{"seq_id":"8944431485","text":"\"\"\"\nDatetime formatting\n~~~~~~~~~~~~~~~~~~~\n\nThis module contains functions about presenting datetimes\nin various formats. Specifically:\n\n - :func:`format_datetime`, returning a\n \"YYYY-MM-DDTHH:MM:SSZ\"-formatted datetime string out of a\n datetime-like value.\n\n - :func:`to_timestamp`, returning a UNIX timestamp out of a\n datetime-like value.\n\nThe datetime-like argument of those functions can be a\n:class:`~datetime.datetime` object, a human-readable datetime string\nor a timestamp. Timezone-naive values will be treated as being in the\nlocal timezone. Result will be converted in UTC, if not already there.\n\"\"\"\n\nfrom datetime import datetime, timezone\n\nimport dateutil.parser\nfrom dateutil.tz import tzlocal\n\n\ndef _as_utc(dt):\n \"\"\"Convert and return given `dt` in UTC. If it is timezone-naive,\n treat it as being in the local timezone.\n \"\"\"\n if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:\n dt = dt.replace(tzinfo=tzlocal())\n return dt.astimezone(timezone.utc)\n\n\ndef _to_datetime(value):\n \"\"\"Return a :class:`~datetime.datetime` object out of a\n datetime-like `value`. Raise :exc:`TypeError` on invalid argument.\n \"\"\"\n if isinstance(value, datetime):\n # Already a datetime, turn it to UTC and return it.\n return _as_utc(value)\n\n try:\n # Try to parse it as a human-readable string.\n parsed = dateutil.parser.parse(value)\n except (TypeError, ValueError):\n # Try to treat it as a timestamp.\n try:\n value = int(value)\n except ValueError:\n message = 'expected datetime.datetime object, valid datetime ' \\\n 'string or timestamp'\n raise TypeError(message) from None\n return datetime.fromtimestamp(value, timezone.utc)\n\n return _as_utc(parsed)\n\n\ndef format_datetime(value):\n \"\"\"Return a datetime string in the \"YYYY-MM-DDTHH:MM:SSZ\" format,\n out of a datetime-like `value`.\n \"\"\"\n iso = _to_datetime(value).isoformat()\n return f'{iso[:-6]}Z'\n\n\ndef to_timestamp(value):\n \"\"\"Return a UNIX timestamp out of a datetime-like `value`.\"\"\"\n timestamp = _to_datetime(value).timestamp()\n return int(timestamp)\n","repo_name":"amikrop/aiomixcloud","sub_path":"aiomixcloud/datetime.py","file_name":"datetime.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"49"} +{"seq_id":"26504977359","text":"# noxfile.py\nimport nox\n\nlocations = \"specufex\", \"tests\", \"noxfile.py\"\nnox.options.sessions = \"lint\", \"tests\"\n\n\n@nox.session(python=[\"3.10\", \"3.9\", \"3.8\"], venv_backend=\"mamba\")\ndef tests(session):\n session.run(\"mamba\", \"install\", \"-y\", \"--file\", \"requirements.txt\")\n session.run(\"pip\", \"install\", \"-r\", \"requirements-dev.txt\")\n session.install(\"-e\", \".\", \"--no-deps\")\n session.run(\"pytest\", \"--cov\")\n\n\n@nox.session(python=[\"3.10\", \"3.9\", \"3.8\"], venv_backend=\"mamba\")\ndef lint(session):\n args = session.posargs or locations\n session.install(\n \"flake8\",\n \"flake8-black\",\n \"flake8-isort\",\n \"flake8-pyprojecttoml\",\n )\n session.run(\"flake8\", *args)\n\n\n@nox.session(python=[\"3.10\", \"3.9\", \"3.8\"], venv_backend=\"mamba\")\ndef black(session):\n args = session.posargs or locations\n session.install(\"black\")\n session.run(\"black\", *args)\n","repo_name":"Specufex/specufex","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"49"} +{"seq_id":"5526783652","text":"\"\"\"\n===========\nZoom Window\n===========\n\nThis example shows how to connect events in one window, for example, a mouse\npress, to another figure window.\n\nIf you click on a point in the first window, the z and y limits of the\nsecond will be adjusted so that the center of the zoom in the second\nwindow will be the x,y coordinates of the clicked point.\n\nNote the diameter of the circles in the scatter are defined in\npoints**2, so their size is independent of the zoom\n\"\"\"\nimport matplotlib.pyplot as plt #import figure, show\nimport numpy as np\n\n\n# nodebox section\nif __name__ == '__builtin__':\n # were in nodebox\n import os\n import tempfile\n W = 800\n inset = 20\n size(W, 600)\n plt.cla()\n plt.clf()\n plt.close('all')\n def tempimage():\n fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)\n fname = fob.name\n fob.close()\n return fname\n imgx = 20\n imgy = 0\n def pltshow(plt, dpi=150):\n global imgx, imgy\n temppath = tempimage()\n plt.savefig(temppath, dpi=dpi)\n dx,dy = imagesize(temppath)\n w = min(W,dx)\n image(temppath,imgx,imgy,width=w)\n imgy = imgy + dy + 20\n os.remove(temppath)\n size(W, HEIGHT+dy+40)\nelse:\n def pltshow(mplpyplot):\n mplpyplot.show()\n# nodebox section end\n\n\nfigsrc = plt.figure()\nfigzoom = plt.figure()\n\naxsrc = figsrc.add_subplot(111, xlim=(0, 1), ylim=(0, 1), autoscale_on=False)\naxzoom = figzoom.add_subplot(111, xlim=(0.45, 0.55), ylim=(0.4, .6),\n autoscale_on=False)\naxsrc.set_title('Click to zoom')\naxzoom.set_title('zoom window')\nx, y, s, c = np.random.rand(4, 200)\ns *= 200\n\n\naxsrc.scatter(x, y, s, c)\naxzoom.scatter(x, y, s, c)\n\n\ndef onpress(event):\n if event.button != 1:\n return\n x, y = event.xdata, event.ydata\n axzoom.set_xlim(x - 0.1, x + 0.1)\n axzoom.set_ylim(y - 0.1, y + 0.1)\n figzoom.canvas.draw()\n\nfigsrc.canvas.mpl_connect('button_press_event', onpress)\npltshow(plt)\n","repo_name":"kantel/nodebox-pyobjc","sub_path":"examples/Extended Application/matplotlib/examples/event_handling/zoom_window.py","file_name":"zoom_window.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"49"} +{"seq_id":"42561595433","text":"#!/usr/bin/env python\n\n\"\"\"\nContains class for collecting and calculating the fourier modes\n\"\"\"\n\nfrom ..superClasses import CollectAndCalcPointsSuperClass\nfrom ..collectAndCalcHelpers import (collectTime,\\\n collectPoloidalProfile,\\\n calcN,\\\n calcUIPar,\\\n calcUEPar,\\\n slicesToIndices)\nimport numpy as np\n\n#{{{CollectAndCalcFourierModes\nclass CollectAndCalcFourierModes(CollectAndCalcPointsSuperClass):\n \"\"\"\n Class for collecting and calcuating the fourier modes\n \"\"\"\n\n #{{{constructor\n def __init__(self, *args, **kwargs):\n #{{{docstring\n \"\"\"\n This constructor will:\n * Call the parent constructor\n\n Parameters\n ----------\n *args : positional arguments\n See parent constructor for details.\n *kwargs : keyword arguments\n See parent constructor for details.\n \"\"\"\n #}}}\n\n # Call the constructor of the parent class\n super().__init__(*args, **kwargs)\n #}}}\n\n #{{{executeCollectAndCalc\n def executeCollectAndCalc(self):\n #{{{docstring\n \"\"\"\n Function which collects and calculates the fourier modess.\n\n Returns\n -------\n fourierModes : dict\n Dictionary where the keys are on the form \"rho,z\".\n The value is a dict containing of\n {varName:fourierModes, \"time\":time}.\n The fourierModes is a 4d array.\n \"\"\"\n #}}}\n\n # Guard\n if len(self._notCalled) > 0:\n message = \"The following functions were not called:\\n{}\".\\\n format(\"\\n\".join(self._notCalled))\n raise RuntimeError(message)\n\n # Initialize output\n fourierModes = {}\n tCounter = 0\n\n for x, y in zip(self._xInd, self._yInd):\n # NOTE: The indices\n rho = self._dh.rho[x]\n par = self._dh.z [y]\n\n # Add key and dict to fourierModes\n key = \"{},{}\".format(rho,par)\n fourierModes[key] = {}\n\n if self._tSlice is not None:\n t = slicesToIndices(self._collectPaths, self._tSlice[tCounter], \"t\")\n tStep = self._tSlice[tCounter].step\n else:\n t = None\n tStep = None\n tCounter += 1\n\n var, time = self._collectWrapper(fourierModes,key,x,y,t,tStep)\n\n if self.uc.convertToPhysical:\n fourierModes[key][self._varName] =\\\n self.uc.physicalConversion(var , self._varName)\n fourierModes[key][\"time\"] =\\\n self.uc.physicalConversion(time, \"t\")\n else:\n fourierModes[key][self._varName] = var\n fourierModes[key][\"time\"] = time\n\n return fourierModes\n #}}}\n\n #{{{convertTo2D\n def convertTo2D(self, fourierModes):\n #{{{docstring\n \"\"\"\n Converts the 4d array to a 2d array.\n\n Parameters\n ----------\n fourierModes : dict\n Output from executeCollectAndCalc.\n\n Returns\n -------\n fourierModes : dict\n As the input, but 2d traces on the form (t, nz) rather than 4d.\n \"\"\"\n #}}}\n\n for key in fourierModes.keys():\n fourierModes[key][self._varName] =\\\n fourierModes[key][self._varName][:,0,0,:]\n\n return fourierModes\n #}}}\n\n #{{{_collectWrapper\n def _collectWrapper(self,fourierModes,key,x,y,t,tStep):\n #{{{docstring\n \"\"\"\n Collects the variable and the time.\n\n If the varName is n, uIPar or uEPar, calculation will be done\n through _calcNonSolvedVars\n\n Parameters\n ----------\n fourierModes : dict\n Dict containing the fourier modess\n key : str\n Key with the point position\n x : int\n The x index to collect from\n y : int\n The y index to collect from\n z : int\n The z index to collect from\n t : [None|tuple]\n The collect-like slice in t\n tStep : [None|int]\n The step to chop the time variable in\n\n Returns\n -------\n var : array-4d\n The fourier transformed collected array.\n time : array\n The time array.\n \"\"\"\n #}}}\n\n time = collectTime(self._collectPaths, tInd=t)\n collector = collectPoloidalProfile\n indArgs = (x, y)\n if not(self._varName == \"n\" or\\\n self._varName == \"uIPar\" or\\\n self._varName == \"uEPar\"):\n var = collector(self._collectPaths,\\\n self._varName,\\\n *indArgs, tInd=t)\n else:\n var = self._calcNonSolvedVars(collector,indArgs,t)\n\n # Fourier transform\n var = np.fft.fft(var)\n\n if tStep is not None:\n # Slice the variables with the step\n # Make a new slice as the collect dealt with the start and\n # the stop of the slice\n newSlice = slice(None, None, tStep)\n\n var = var [newSlice]\n time = time[newSlice]\n\n return var, time\n #}}}\n\n #{{{_calcNonSolvedVars\n def _calcNonSolvedVars(self,collector,indArgs,t):\n #{{{docstring\n \"\"\"\n Calculates variables wich are not solved in the simulation\n\n NOTE: We set normalized True here as the collected\n variables are normalized.\n Conversion to physical happens later in\n executeCollectAndCalc.\n\n Parameters\n ----------\n collector : func\n Function to use for collection\n indArgs : tuple\n Tuple of index arguments.\n Either x and y.\n\n Returns\n -------\n var : 4d-array\n The fourier modes of the calculated variable.\n \"\"\"\n #}}}\n\n normalized = True\n\n lnN = collector(self._collectPaths,\\\n \"lnN\" ,\\\n *indArgs, tInd=t)\n n = calcN(lnN, normalized, uc = self.uc)\n if self._varName == \"n\":\n return n\n else:\n momDensPar = collector(self._collectPaths,\\\n \"momDensPar\" ,\\\n *indArgs, tInd=t)\n uIPar = calcUIPar(momDensPar, n)\n if self._varName == \"uIPar\":\n return uIPar\n else:\n jPar = collector(self._collectPaths,\\\n \"jPar\" ,\\\n *indArgs, tInd=t)\n uEPar = calcUEPar(uIPar ,\\\n jPar ,\\\n n ,\\\n normalized)\n return uEPar\n #}}}\n\n @staticmethod\n #{{{obtainVarName\n def obtainVarName(fourierModes2d):\n #{{{docstring\n \"\"\"\n Obtains the varName of the input dict\n\n Parameters\n ----------\n fourierModes2d : dict\n Dictionary where the keys are on the form \"rho,z\".\n The value is a dict containing of\n {varName:fourierModes, \"time\":time}.\n The fourierModes is a 2d array on the form (t,mode).\n\n Returns\n -------\n varName : str\n The variable name\n \"\"\"\n #}}}\n # Obtain the varname\n ind = tuple(fourierModes2d.keys())[0]\n keys = fourierModes2d[ind].keys()\n varName = tuple(var for var in keys if var != \"time\")[0]\n # Strip the variable name\n varName = varName.replace(\"Magnitude\",\"\")\n varName = varName.replace(\"AngularFrequency\",\"\")\n\n return varName\n #}}}\n\n @staticmethod\n #{{{calcMagnitude\n def calcMagnitude(fourierModes2d):\n #{{{docstring\n \"\"\"\n Calculates the magnitude of the 2d fourier signal.\n\n Parameters\n ----------\n fourierModes2d : dict\n Dictionary where the keys are on the form \"rho,z\".\n The value is a dict containing of at least\n {varName:fourierModes}.\n The fourierModes is a 2d array on the form (t,mode).\n\n Returns\n -------\n fourierModes2d : dict\n As the input, but contains the key varNameMagnitude with\n values on the form (t, (nz/2) + 1) for each position.\n \"\"\"\n #}}}\n\n varName = CollectAndCalcFourierModes.obtainVarName(fourierModes2d)\n\n for key in fourierModes2d.keys():\n modes = fourierModes2d[key][varName].copy()\n tSize, N = modes.shape\n nyquistMode = int(N/2) + 1\n magnitude = np.zeros((tSize, nyquistMode))\n #{{{ NOTE: We are dealing with a real signal:\n # As the fourier transform breaks the signal up in\n # cisoids there will be one part of the signal in\n # the positive rotating ciscoid and one in the\n # negative (negative frequencies) for a given mode\n # number. We need to take into account both in\n # order to calculate the amplitude.\n # http://dsp.stackexchange.com/questions/431/what-is-the-physical-significance-of-negative-frequencies?noredirect=1&lq=1\n # http://dsp.stackexchange.com/questions/4825/why-is-the-fft-mirrored\n #}}}\n # Magnitude of the signal\n # https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Definition\n # The offset mode and the Nyquist mode\n magnitude[:,0] = np.abs(modes[:,0])/N\n # Minus 1 as indices count from 0\n magnitude[:,nyquistMode-1] = np.abs(modes[:,nyquistMode])/N\n # Loop over all the modes\n # NOTE: Range exludes the last point\n # Minus 1 as indices count from 0\n for modeNr in range(1, nyquistMode-1):\n posFreq = np.abs(modes[:, modeNr])\n negFreq = np.abs(modes[:, -modeNr])\n magnitude[:, modeNr] = (posFreq + negFreq)/N\n\n # Insert into the dict\n fourierModes2d[key][varName+\"Magnitude\"] = magnitude\n return fourierModes2d\n #}}}\n\n @staticmethod\n #{{{calcAngularFrequency\n def calcAngularFrequency(fourierModes2d):\n #{{{docstring\n \"\"\"\n Calculates the phaseShift of the 2d fourier signal.\n\n Parameters\n ----------\n fourierModes2d : dict\n Dictionary where the keys are on the form \"rho,z\".\n The value is a dict containing of\n {varName:fourierModes, \"time\":time}.\n The fourierModes is a 2d array on the form (t,mode).\n\n Returns\n -------\n fourierModes2d : dict\n As the input, but contains the key varNameAngularFrequency\n with values on the form (t, (nz/2) + 1) for each position.\n NOTE: A negative angular frequency means that the\n perturbations are moving in the negative theta\n direction.\n \"\"\"\n #}}}\n\n varName = CollectAndCalcFourierModes.obtainVarName(fourierModes2d)\n\n for key in fourierModes2d.keys():\n modes = fourierModes2d[key][varName].copy()\n time = fourierModes2d[key][\"time\"]\n tSize, N = modes.shape\n nyquistMode = int(N/2) + 1\n angularFreq = np.zeros((tSize-1, nyquistMode))\n #{{{ NOTE: We are dealing with a real signal:\n # As the signal is real only one of the phase sifts\n # are needed. Notice that for a real signal the\n # imaginary part occurs as a complex conjugate pair\n # http://dsp.stackexchange.com/questions/431/what-is-the-physical-significance-of-negative-frequencies?noredirect=1&lq=1\n # http://dsp.stackexchange.com/questions/4825/why-is-the-fft-mirrored\n #}}}\n # The phase shift is found from atan2\n # http://dsp.stackexchange.com/questions/23994/meaning-of-real-and-imaginary-part-of-fourier-transform-of-a-signal\n # atan2 in [-pi, pi]\n for modeNr in range(0, nyquistMode):\n for tInd in range(1, tSize):\n deltaT = time[tInd] - time[tInd-1]\n prevPhaseShift = np.arctan2(modes[tInd-1, modeNr].imag,\\\n modes[tInd-1, modeNr].real)\n curPhaseShift = np.arctan2(modes[tInd , modeNr].imag,\\\n modes[tInd , modeNr].real)\n # phaseShiftDiff in [0, 2*pi]\n phaseShiftDiff = prevPhaseShift - curPhaseShift\n\n # Corrections\n if curPhaseShift*prevPhaseShift < 0\\\n and abs(curPhaseShift) + abs(prevPhaseShift) > np.pi:\n if curPhaseShift < 0:\n # We are going from pi to -pi\n # In order to avoid the discontinuity, we turn\n # curPhaseShift and prevPhaseShift to the opposite\n # quadrants\n tempCurPhase = np.pi + curPhaseShift\n tempPrevPhase = np.pi - prevPhaseShift\n phaseShiftDiff = -(tempCurPhase + tempPrevPhase)\n else:\n # We are going from -pi to pi\n # In order to avoid the discontinuity, we\n # turn curPhaseShift and prevPhaseShift to\n # the opposite quadrants\n tempCurPhase = np.pi - curPhaseShift\n tempPrevPhase = np.pi + prevPhaseShift\n phaseShiftDiff = tempCurPhase + tempPrevPhase\n\n # The angular speed (angular frequency) has units rad/s.\n # Remember that if angularFreq*t = 2*pi the perturbation has\n # revolved one time\n angularFreq[tInd-1, modeNr] = phaseShiftDiff/deltaT\n\n # Insert into the dict\n fourierModes2d[key][varName+\"AngularFrequency\"] = angularFreq\n return fourierModes2d\n #}}}\n#}}}\n","repo_name":"CELMA-project/CELMA-py","sub_path":"CELMAPy/fourierModes/collectAndCalcFourierModes.py","file_name":"collectAndCalcFourierModes.py","file_ext":"py","file_size_in_byte":14640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"40055750224","text":"\"\"\"Load labware command request, result, and implementation models.\"\"\"\nfrom __future__ import annotations\n\nfrom pydantic import BaseModel, Field\nfrom typing import Tuple, Optional\n\nfrom opentrons.protocols.models import LabwareDefinition\n\nfrom ..types import LabwareLocation\nfrom .command import CommandImplementation, CommandHandlers\n\n\nclass LoadLabwareRequest(BaseModel):\n \"\"\"A request to load a labware into a slot.\"\"\"\n\n location: LabwareLocation = Field(\n ...,\n description=\"Location the labware should be loaded into.\",\n )\n loadName: str = Field(\n ...,\n description=\"Name used to reference a labware definition.\",\n )\n namespace: str = Field(\n ...,\n description=\"The namespace the labware definition belongs to.\",\n )\n version: int = Field(\n ...,\n description=\"The labware definition version.\",\n )\n labwareId: Optional[str] = Field(\n None,\n description=\"An optional ID to assign to this labware. If None, an ID \"\n \"will be generated.\"\n )\n\n def get_implementation(self) -> LoadLabwareImplementation:\n \"\"\"Get the load labware request's command implementation.\"\"\"\n return LoadLabwareImplementation(self)\n\n\nclass LoadLabwareResult(BaseModel):\n \"\"\"Result data from the execution of a LoadLabwareRequest.\"\"\"\n\n labwareId: str = Field(\n ...,\n description=\"An ID to reference this labware in subsequent commands.\",\n )\n definition: LabwareDefinition = Field(\n ...,\n description=\"The full definition data for this labware.\",\n )\n calibration: Tuple[float, float, float] = Field(\n ...,\n description=\"Calibration offset data for this labware at load time.\",\n )\n\n\nclass LoadLabwareImplementation(\n CommandImplementation[LoadLabwareRequest, LoadLabwareResult]\n):\n \"\"\"Load labware command implementation.\"\"\"\n\n async def execute(self, handlers: CommandHandlers) -> LoadLabwareResult:\n \"\"\"Load definition and calibration data necessary for a labware.\"\"\"\n loaded_labware = await handlers.equipment.load_labware(\n load_name=self._request.loadName,\n namespace=self._request.namespace,\n version=self._request.version,\n location=self._request.location,\n labware_id=self._request.labwareId\n )\n\n return LoadLabwareResult(\n labwareId=loaded_labware.labware_id,\n definition=loaded_labware.definition,\n calibration=loaded_labware.calibration,\n )\n","repo_name":"Corey-ONeal/opentrons-app_ws-remote","sub_path":"api/src/opentrons/protocol_engine/commands/load_labware.py","file_name":"load_labware.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"36295606353","text":"import airo_blender as ab\nimport bpy\nimport numpy as np\nfrom synthetic_cloth_data.geometric_templates import TshirtMeshConfig, create_tshirt_vertices\n\nfrom linen.blender.curve import add_discrete_curve, add_line_segment\nfrom linen.blender.path import add_linen_trajectory_visualization\nfrom linen.blender.plane import add_plane\nfrom linen.blender.render_setups import setup_cycles, setup_white_background\nfrom linen.blender.robotics.robotiq import add_animated_robotiq\nfrom linen.elemental.move_backwards import move_gripper_backwards_trajectory\nfrom linen.folding.fold_lines.shirt import shirt_sleeve_and_side_fold_line\nfrom linen.folding.trajectories.circular_fold import circular_fold_trajectory\nfrom linen.geometry.project import project_point_on_line\nfrom linen.grasping.edge_grasps import orthogonal_insetted_edge_grasps\nfrom linen.grasping.shirt.shirt_grasps import shirt_sleeve_and_waist_grasps\nfrom linen.grasping.slide_grasp import slide_grasp_trajectory\nfrom linen.path.concatenate import concatenate_trajectories\nfrom linen.path.reparametrization.speed import scale_speed\n\nbpy.ops.object.delete()\n\ntable = add_plane(0.8, 1.4)\ntable.location.z = -0.001\n\n\nvertices, keypoints = create_tshirt_vertices(TshirtMeshConfig())\nkeypoints_3D = keypoints.values()\n\n\nadd_discrete_curve(vertices, closed=True)\n\ngrasp_depth = 0.05\nheight_offset = 0.025\napproach_angle = np.pi / 4\napproach_margin = 0.03\napproach_distance = grasp_depth + approach_margin\nshirt_left_side = False\nmiddle_fold = True\n\nif middle_fold:\n waist_left = keypoints[\"waist_left\"]\n waist_right = keypoints[\"waist_right\"]\n neck_left = keypoints[\"neck_left\"]\n neck_right = keypoints[\"neck_right\"]\n\n fold_line_left = shirt_sleeve_and_side_fold_line(keypoints, left=True, offset_from_armpit_fraction=1.0 / 5)\n fold_line_right = shirt_sleeve_and_side_fold_line(keypoints, left=False, offset_from_armpit_fraction=1.0 / 5)\n\n waist_left_to_right = waist_right - waist_left\n waist_left_to_right /= np.linalg.norm(waist_left_to_right)\n\n waist_line = (waist_left, waist_left_to_right)\n\n fold_line_left_point = fold_line_left[0]\n fold_line_right_point = fold_line_right[0]\n\n waist_left_fold_point = project_point_on_line(fold_line_left_point, waist_line)\n waist_right_fold_point = project_point_on_line(fold_line_right_point, waist_line)\n\n neck_left_to_right = neck_right - neck_left\n neck_left_to_right /= np.linalg.norm(neck_left_to_right)\n\n left_to_right = neck_left_to_right + waist_left_to_right\n left_to_right /= np.linalg.norm(left_to_right)\n\n fold_line_direction = -left_to_right\n\n neck_middle = (neck_left + neck_right) / 2\n waist_middle = (waist_left + waist_right) / 2\n shirt_middle = (neck_middle + waist_middle) / 2\n\n fold_line = (shirt_middle, fold_line_direction)\n\n # calculate grasps\n grasp_left, grasp_right = orthogonal_insetted_edge_grasps(\n waist_left_fold_point, waist_right_fold_point, grasp_depth=grasp_depth, inset=0.03\n )\n\n print(grasp_left, grasp_right)\n\nelse:\n fold_line = shirt_sleeve_and_side_fold_line(keypoints, left=shirt_left_side, offset_from_armpit_fraction=1.0 / 5)\n sleeve_grasp, waist_grasp = shirt_sleeve_and_waist_grasps(\n keypoints, grasp_depth=grasp_depth, waist_inset=0.03, left=shirt_left_side\n )\n\n if shirt_left_side:\n grasp_left = sleeve_grasp\n grasp_right = waist_grasp\n else:\n grasp_left = waist_grasp\n grasp_right = sleeve_grasp\n\ngrasp_location_left, grasp_direction_left = grasp_left\ngrasp_location_right, grasp_direction_right = grasp_right\n\ngrasp_location_left[2] += height_offset\ngrasp_location_right[2] += height_offset\n\ngrasp_trajectory_left = slide_grasp_trajectory(\n grasp_location_left, grasp_direction_left, approach_angle=approach_angle, approach_distance=approach_distance\n)\ngrasp_trajectory_right = slide_grasp_trajectory(\n grasp_location_right, grasp_direction_right, approach_angle=approach_angle, approach_distance=approach_distance\n)\n\nfold_arc_trajectory_left = circular_fold_trajectory(\n grasp_location_left, grasp_direction_left, fold_line, start_pitch_angle=approach_angle, speed=0.1\n)\n\nfold_arc_trajectory_right = circular_fold_trajectory(\n grasp_location_right, grasp_direction_right, fold_line, start_pitch_angle=approach_angle, speed=0.1\n)\n\nduration_left = fold_arc_trajectory_left.duration\nduration_right = fold_arc_trajectory_right.duration\n\nif duration_left > duration_right:\n # slow down right\n factor = duration_right / duration_left\n fold_arc_trajectory_right = scale_speed(fold_arc_trajectory_right, factor)\nelif duration_right > duration_left:\n # slow down left\n factor = duration_left / duration_right\n fold_arc_trajectory_left = scale_speed(fold_arc_trajectory_left, factor)\n\n\nretreat_trajectory_left = move_gripper_backwards_trajectory(fold_arc_trajectory_left.end, grasp_depth, 0.1)\nretreat_trajectory_right = move_gripper_backwards_trajectory(fold_arc_trajectory_right.end, grasp_depth, 0.1)\n\ntrajectory_left = concatenate_trajectories([grasp_trajectory_left, fold_arc_trajectory_left, retreat_trajectory_left])\ntrajectory_right = concatenate_trajectories(\n [grasp_trajectory_right, fold_arc_trajectory_right, retreat_trajectory_right]\n)\n\n# # Visualization\nfold_line_point, fold_line_direction = fold_line\nfold_line_start = fold_line_point - fold_line_direction\nfold_line_end = fold_line_point + fold_line_direction\nfold_line_object = add_line_segment(fold_line_start, fold_line_end)\nab.add_material(fold_line_object, [1.000000, 0.404182, 0.011072, 1.000000])\n\nadd_linen_trajectory_visualization(trajectory_left, pose_size=0.05)\nadd_linen_trajectory_visualization(trajectory_right, pose_size=0.05)\n\nadd_animated_robotiq(trajectory_left, closed=False)\nadd_animated_robotiq(trajectory_right, closed=False)\n\nsetup_cycles()\nsetup_white_background()\n\ncamera = bpy.context.scene.camera\ncamera.location = (1.91, -1.13, 0.62)\ncamera.rotation_euler = list(np.deg2rad([77.2, 0, 59.1]))\n","repo_name":"Victorlouisdg/linen","sub_path":"scripts/blender/folds/02_shirt.py","file_name":"02_shirt.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"74178243028","text":"#\n# @lc app=leetcode id=387 lang=python3\n#\n# [387] First Unique Character in a String\n#\n\n# @lc code=start\nclass Solution:\n def firstUniqChar(self, s: str) -> int:\n mem = {}\n\n for i, c in enumerate(s):\n if c not in mem:\n mem[c] = i\n else:\n mem[c] = -1\n\n nr = [i for _, i in mem.items() if i >= 0]\n return min(nr) if nr else -1\n# @lc code=end\n\n","repo_name":"aobo-y/leetcode","sub_path":"387.first-unique-character-in-a-string.py","file_name":"387.first-unique-character-in-a-string.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"33385930049","text":"import os\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torchvision.transforms.functional as F\nimport time\nfrom threading import Thread, Lock\nimport traceback\nimport logging\nfrom src.image_processor.image_processor import ImageProcessor\nfrom src.utils.download_gdrive import download_gdrive\n\n\nclass ModelWorker(ImageProcessor):\n\n def __init__(\n self,\n traced_model_path=None,\n image_shape=(416, 416),\n id=0\n ):\n super(ModelWorker, self).__init__()\n self.id = id\n self.mutex = Lock()\n self.traced_model_path = traced_model_path if traced_model_path is not None else \"traced_model.pt\"\n if not os.path.isfile(self.traced_model_path):\n logging.info(f\"{self.__class__.__name__}: {self.id} download traced model params\")\n self.traced_file_id = \"10xFg7qXLtJ3Oc6rQyOlumkoaOU1U4PiU\"\n download_gdrive(self.traced_file_id, self.traced_model_path)\n\n logging.info(f\"{self.__class__.__name__}: {self.id} load traced model\")\n self.traced_model = torch.jit.load(self.traced_model_path)\n self.image_shape = image_shape\n logging.info(f\"{self.__class__.__name__}: {self.id} model ready\")\n\n def __call__(self, image_data: np.ndarray):\n \"\"\"Threadsafe call function\"\"\"\n self.mutex.acquire(False)\n logging.info(f\"{self.__class__.__name__}: {self.id} called\")\n try:\n # preprocess image_data\n image = image_data.copy()\n if image.ndim == 2:\n # grayscale image\n image = image[:, :, np.newaxis]\n image = np.repeat(image, repeats=3, axis=2)\n elif image.ndim == 3 and image.shape[2] == 4:\n # image with alpha channel\n image = image[:, :, 0:3]\n\n im_h, im_w = image.shape[:2]\n if im_h > im_w:\n pad_top = pad_down = 0\n pad_left = (im_h - im_w) // 2\n pad_right = im_h - (im_w + pad_left)\n else:\n pad_top = (im_w - im_h) // 2\n pad_down = im_w - (im_h + pad_top)\n pad_left = pad_right = 0\n paddings = (pad_left, pad_top, pad_right, pad_down)\n\n image = Image.fromarray(image)\n image = F.pad(image, padding=paddings, fill=128)\n image = F.resize(image, self.image_shape)\n image = F.to_tensor(image)\n image = F.normalize(image, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n image = image.unsqueeze(0)\n\n # predict mask_data\n predicted_mask = self.traced_model(image)\n\n # post_process mask_data\n predicted_mask = predicted_mask.squeeze()[1]\n predicted_mask = F.to_pil_image(predicted_mask)\n predicted_mask = F.resize(\n predicted_mask,\n (im_h + pad_top + pad_down,\n im_w + pad_left + pad_right)\n )\n predicted_mask = F.crop(\n predicted_mask,\n i=pad_top,\n j=pad_left,\n h=im_h,\n w=im_w\n )\n predicted_mask = np.array(predicted_mask)\n predicted_mask[predicted_mask > 128] = 255\n predicted_mask[predicted_mask < 255] = 0\n mask_data = predicted_mask\n return image_data, mask_data\n except Exception:\n logging.info(f\"{self.__class__.__name__}: {traceback.format_exc()}\")\n return image_data, np.ones(image_data.shape[:2])\n finally:\n self.mutex.release()\n\n\nclass ModelImageProcessor(ImageProcessor):\n def __init__(self, traced_model_path, image_shape=(416, 416), num_workers=1):\n super(ModelImageProcessor, self).__init__()\n self.traced_model_path = traced_model_path\n self.image_shape = image_shape\n self.num_workers = num_workers\n self.workers = [\n ModelWorker(\n traced_model_path=traced_model_path,\n image_shape=self.image_shape,\n id=i\n )\n for i in range(self.num_workers)\n ]\n\n def __call__(self, image_data: np.ndarray):\n while True:\n for worker in self.workers:\n successfully_acquired = worker.mutex.acquire(False)\n if successfully_acquired:\n out = worker(image_data=image_data)\n return out\n else:\n time.sleep(0.1)\n\n\ndef test_model_worker():\n model_processor = ModelWorker(\n traced_model_path='traced_model.pt',\n image_shape=(416, 416)\n )\n import skimage.io as skio\n image = skio.imread(\"image_test.png\")\n\n image, mask = model_processor(image)\n skio.imsave(\"mask_test_0.png\", mask)\n\n\ndef test_model_image_processor():\n\n model_processor = ModelImageProcessor(\n traced_model_path='traced_model.pt',\n image_shape=(416, 416),\n num_workers=2\n )\n import skimage.io as skio\n image = skio.imread(\"image_test.png\")\n image, mask1 = model_processor(image)\n image, mask2 = model_processor(image)\n\n skio.imsave(\"mask_test_1.png\", mask1)\n skio.imsave(\"mask_test_2.png\", mask2)\n\n\nif __name__ == '__main__':\n test_model_worker()\n # test_model_image_processor()\n","repo_name":"meikuam/cat_faces","sub_path":"src/model/model_image_processor.py","file_name":"model_image_processor.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"71877744148","text":"import numpy as np\nimport numpy.typing as npt\n\n\ndef _create_grid(instructions: list[str], grid_dims: int) -> npt.NDArray[np.ubyte]:\n \"Return a 2D grid (numpy array) representing lights turned on (1) or off (0)\"\n grid = np.zeros((grid_dims, grid_dims), dtype=np.ubyte)\n for row, line in enumerate(instructions):\n for col, char in enumerate(line):\n if char == \"#\":\n grid[row][col] = 1\n return grid\n\n\ndef _count_neighbors(grid: npt.NDArray[np.ubyte], row: int, col: int) -> int:\n \"Returns the number of neighbors turned on\"\n neighbors_on = 0\n\n for neigh_row in range(max(row - 1, 0), min(row + 2, grid.shape[0])):\n for neigh_col in range(max(col - 1, 0), min(col + 2, grid.shape[0])):\n\n if grid[neigh_row][neigh_col] and (neigh_row, neigh_col) != (row, col):\n neighbors_on += 1\n\n return neighbors_on\n\n\ndef _simulate_step_part1(grid: npt.NDArray[np.ubyte]) -> npt.NDArray[np.ubyte]:\n \"Create a new grid and fill each position according to the neighbors of its predecessor\"\n new_grid = np.zeros(grid.shape, dtype=np.ubyte)\n\n for row_idx, row in enumerate(grid):\n for col_idx, val in enumerate(row):\n neighbors_on = _count_neighbors(grid, row_idx, col_idx)\n if val == 1 and neighbors_on in (2, 3):\n new_grid[row_idx][col_idx] = 1\n\n elif val == 0 and neighbors_on == 3:\n new_grid[row_idx][col_idx] = 1\n\n return new_grid\n\n\ndef _simulate_step_part2(grid: npt.NDArray[np.ubyte]) -> npt.NDArray[np.ubyte]:\n \"Same as _simlate_step_part1 except that the corners are always on\"\n new_grid = np.zeros(grid.shape, dtype=np.ubyte)\n corners = (\n (0, 0),\n (0, grid.shape[0] - 1),\n (grid.shape[0] - 1, 0),\n (grid.shape[0] - 1, grid.shape[0] - 1),\n )\n\n for row_idx, row in enumerate(grid):\n for col_idx, val in enumerate(row):\n if (row_idx, col_idx) not in corners:\n neighbors_on = _count_neighbors(grid, row_idx, col_idx)\n if val == 1 and neighbors_on in (2, 3):\n new_grid[row_idx][col_idx] = 1\n\n elif val == 0 and neighbors_on == 3:\n new_grid[row_idx][col_idx] = 1\n else:\n new_grid[row_idx][col_idx] = 1\n\n return new_grid\n\n\ndef part1(instructions: list[str], grid_dims: int, sim_length: int) -> int:\n \"Return the number of lights turned on after simulating the grid for the given simulation legnth\"\n grid = _create_grid(instructions, grid_dims)\n\n for _ in range(sim_length):\n grid = _simulate_step_part1(grid)\n\n return grid.sum()\n\n\ndef part2(instructions: list[str], grid_dims: int, sim_length: int) -> int:\n \"Same as part 1 except that the four corners must always stay on\"\n grid = _create_grid(instructions, grid_dims)\n grid[0][0] = 1\n grid[0][grid_dims - 1] = 1\n grid[grid_dims - 1][0] = 1\n grid[grid_dims - 1][grid_dims - 1] = 1\n\n for _ in range(sim_length):\n grid = _simulate_step_part2(grid)\n\n return grid.sum()\n\n\nif __name__ == \"__main__\":\n input_file = \"advent_of_code/year_2015/day_18/input.txt\"\n with open(input_file, \"r\") as file:\n instructions = [line.strip() for line in file.readlines()]\n\n print(f\"Part 1: {part1(instructions, 100, 100)}\")\n print(f\"Part 2: {part2(instructions, 100, 100)}\")\n","repo_name":"jcabre04/advent-of-code","sub_path":"advent_of_code/year_2015/day_18/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"44001062934","text":"sim_list = [ate_hours_2[0],ate_hours_2[1],ate_cc_2,ate_inc[1]]\nobs_list = [ate_hours_obs_2[0,0],ate_hours_obs_2[1,0],ate_cc_obs[0,0],ate_inc_obs_2[0,0]]\nobs_list_se = [se_ate_hours_obs_2[0,0],se_ate_hours_obs_2[1,0],se_ate_cc_obs[0,0],se_ate_inc_obs_2[0,0]]\nvar_list = [r'Hours worked ($t=0$)', r'Hours worked ($t=1$)', r'Child care ($t=1$)',r'Log consumption ($t=1$)']\n\n#writing the table\nwith open('/mnt/Research/nealresearch/new-hope-secure/newhopemount/results/Model/fit/table_validation.tex','w') as f:\n\tf.write(r'\\begin{tabular}{llccc}'+'\\n')\n\tf.write(r'\\hline' + '\\n')\n\tf.write(r'\\textbf{Treatment effect} && \\textbf{Simulated} && \\textbf{Observed} \\bigstrut\\\\' + '\\n')\n\tf.write(r'\\cline{1-1}\\cline{3-3}\\cline{5-5}&&&& \\bigstrut[t]\\\\' + '\\n')\n\tfor j in range(len(sim_list)):\n\t\tf.write(var_list[j]+' && ' + \n\t\t\t'{:04.3f}'.format(sim_list[j]) + \n\t\t\tr' & &'+ '{:04.3f}'.format(obs_list[j])+r' \\\\'+'\\n')\n\t\tf.write(r' & & & & ( '+ '{:04.3f}'.format(obs_list_se[j])+ r' )\\\\'+'\\n')\n\t\tf.write(r' & & & & \\\\'+'\\n')\n\n\t\n\n\tf.write(r'\\hline'+'\\n')\n\tf.write(r'\\end{tabular}' + '\\n')\n\tf.close()\n\n\n\n","repo_name":"jorginho84/DDCM-NH","sub_path":"model/model_v2/fit/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"37062807720","text":"from django.db import models\nfrom django.core.validators import EmailValidator, MinLengthValidator, MaxLengthValidator\n\nclass UserModel(models.Model):\n id = models.AutoField(primary_key=True)\n full_name = models.CharField(max_length=100, verbose_name=\"Nombre completo\", validators=\n [MinLengthValidator(6, \"El nombre debe tener al menos 6 caracteres.\")])\n email = models.EmailField(\n max_length=100,\n unique=True,\n verbose_name=\"Correo electrónico\",\n validators=[EmailValidator(\"El correo proporcionado no es válido.\"),\n MinLengthValidator(6, \"El nombre debe tener al menos 6 caracteres.\")]\n )\n password = models.CharField(\n max_length=20,\n verbose_name=\"Contraseña\",\n validators=[MinLengthValidator(8, \"La contraseña debe tener al menos 8 caracteres.\"), \n MaxLengthValidator(12, \"La contraseña no puede tener más de 12 caracteres\")]\n )\n \n def __str__(self):\n return str(self.email)","repo_name":"MauricioMG94/MyMoneyApp","sub_path":"apps/users/models/userModel.py","file_name":"userModel.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"27896374483","text":"from unittest.mock import call, patch\n\nimport pytest\nfrom mocks.mock_context import MockContext\nfrom mocks.mock_selenium import mock_paint_times, mock_performance_times\nfrom selenium.common.exceptions import TimeoutException\n\nfrom mite.exceptions import MiteError\nfrom mite_selenium import _SeleniumWrapper, mite_selenium\n\nEXAMPLE_WEBDRIVER_CONFIG = {\n \"webdriver_command_executor\": \"http://127.0.0.1:4444/wd/test\",\n \"webdriver_keep_alive\": True,\n \"webdriver_file_detector\": \"mocks.mock_selenium:file_detector\",\n \"webdriver_proxy\": \"mocks.mock_selenium:proxy\",\n \"webdriver_browser_profile\": \"mocks.mock_selenium:browser_profile\",\n \"webdriver_options\": \"mocks.mock_selenium:options\",\n \"webdriver_capabilities\": \"mocks.mock_selenium:capabilities\",\n}\n\nLIGHTWEIGHT_WEBDRIVER_CONFIG = {\n \"webdriver_capabilities\": \"mocks.mock_selenium:capabilities\"\n}\n\n\n# Mock webdriver capabilities with spec import\nwebdriver_capabilities = {\"browser\": \"Chrome\"}\nDICT_CAPABILITIES_CONFIG = {\n \"webdriver_capabilities\": \"test_webdriver:webdriver_capabilities\"\n}\n\n\ndef test_config_loaded():\n context = MockContext()\n context.config = EXAMPLE_WEBDRIVER_CONFIG\n wrapper = _SeleniumWrapper(context)\n assert wrapper._keep_alive is True\n assert wrapper._file_detector is True\n assert wrapper._proxy is True\n assert wrapper._browser_profile is True\n assert wrapper._options is True\n assert wrapper._capabilities is True\n\n\ndef test_config_defaults():\n context = MockContext()\n context.config = LIGHTWEIGHT_WEBDRIVER_CONFIG\n wrapper = _SeleniumWrapper(context)\n assert wrapper._keep_alive is False\n assert wrapper._file_detector is None\n assert wrapper._proxy is None\n assert wrapper._browser_profile is None\n assert wrapper._options is None\n assert wrapper._capabilities is True\n\n\ndef test_webdriver_capabilities_as_dict():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n assert wrapper._capabilities == {\"browser\": \"Chrome\"}\n\n\n@patch(\"mite_selenium.SeleniumRemote\", autospec=True)\ndef test_webdriver_start_stop(MockRemote):\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n wrapper._start()\n MockRemote.assert_called_with(\n browser_profile=None,\n command_executor=wrapper._command_executor,\n desired_capabilities={\"browser\": \"Chrome\"},\n file_detector=None,\n keep_alive=False,\n options=None,\n proxy=None,\n )\n wrapper._quit()\n # For some reason, calling the Mock provides a reference to the instance\n # that was created when the mock was previously instantiated\n MockRemote().quit.assert_called()\n\n\ndef test_get_js_metrics_context():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n context = wrapper.get_js_metrics_context()\n assert context._browser == wrapper\n assert context.results is None\n\n\n@pytest.mark.asyncio\nasync def test_js_metrics_context_manager():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\") as mock_remote:\n wrapper._start()\n js_context = wrapper.get_js_metrics_context()\n\n async with js_context:\n pass\n\n calls = [\n call(\"performance.clearResourceTimings()\"),\n call(\"return performance.getEntriesByType('resource')\"),\n ]\n mock_remote.return_value.execute_script.assert_has_calls(calls)\n\n\ndef test_wait_for_element():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\") as mock_remote, patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\") as mock_ec:\n wrapper._start()\n locator = (\"foo\", \"bar\")\n wrapper.wait_for_element(locator, timeout=7)\n\n mock_web_driver_wait.assert_called_once_with(mock_remote.return_value, 7)\n mock_web_driver_wait.return_value.until.assert_called_once_with(\n mock_ec.presence_of_element_located.return_value\n )\n mock_ec.presence_of_element_located.assert_called_once_with(locator)\n\n\ndef test_wait_for_element_raises_timeout_exception():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\"), patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\"):\n wrapper._start()\n locator = (\"foo\", \"bar\")\n mock_web_driver_wait.return_value.until.side_effect = TimeoutException\n with pytest.raises(MiteError, match=\"Timed out trying to find element\"):\n wrapper.wait_for_element(locator, timeout=7)\n\n\ndef test_wait_for_elements():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\") as mock_remote, patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\") as mock_ec:\n wrapper._start()\n locator = (\"foo\", \"bar\")\n wrapper.wait_for_elements(locator, timeout=7)\n\n mock_web_driver_wait.assert_called_once_with(mock_remote.return_value, 7)\n mock_web_driver_wait.return_value.until.assert_called_once_with(\n mock_ec.presence_of_all_elements_located.return_value\n )\n mock_ec.presence_of_all_elements_located.assert_called_once_with(locator)\n\n\ndef test_wait_for_elements_raises_timeout_exception():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\"), patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\"):\n wrapper._start()\n locator = (\"foo\", \"bar\")\n mock_web_driver_wait.return_value.until.side_effect = TimeoutException\n with pytest.raises(MiteError, match=\"Timed out trying to find elements\"):\n wrapper.wait_for_elements(locator, timeout=7)\n\n\ndef test_wait_for_url():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\") as mock_remote, patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\") as mock_ec:\n wrapper._start()\n locator = \"https://google.com\"\n wrapper.wait_for_url(locator, timeout=7)\n\n mock_web_driver_wait.assert_called_once_with(mock_remote.return_value, 7)\n mock_web_driver_wait.return_value.until.assert_called_once_with(\n mock_ec.url_to_be.return_value\n )\n mock_ec.url_to_be.assert_called_once_with(locator)\n\n\ndef test_wait_for_url_raises_timeout_exception():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\"), patch(\n \"mite_selenium.WebDriverWait\"\n ) as mock_web_driver_wait, patch(\"mite_selenium.EC\"):\n wrapper._start()\n locator = \"https://google.com\"\n mock_web_driver_wait.return_value.until.side_effect = TimeoutException\n with pytest.raises(MiteError, match=\"Timed out waiting for url to be\"):\n wrapper.wait_for_url(locator, timeout=7)\n\n\ndef test_webdriver_get():\n wrapper = _setup_wrapper(DICT_CAPABILITIES_CONFIG)\n with patch(\"mite_selenium.SeleniumRemote\") as mock_remote:\n mock_remote.return_value.capabilities = {\"browserName\": \"chrome\"}\n mock_remote.return_value.execute_script.side_effect = [\n mock_performance_times,\n mock_paint_times,\n ]\n wrapper._start()\n wrapper.get(\"https://google.com\")\n\n mock_remote.assert_called()\n mock_remote.return_value.get.assert_called_with(\"https://google.com\")\n mock_remote.return_value.execute_script.assert_called()\n assert wrapper._remote == mock_remote.return_value\n\n\n@pytest.mark.asyncio\nasync def test_selenium_context_manager():\n context = MockContext()\n context.config = DICT_CAPABILITIES_CONFIG\n\n @mite_selenium\n async def test(context):\n pass\n\n # patch with async decorator misbehaving\n with patch(\"mite_selenium.SeleniumRemote\", autospec=True) as mock_remote:\n await test(context)\n\n mock_remote.assert_called()\n mock_remote().quit.assert_called()\n\n\n@pytest.mark.asyncio\nasync def test_selenium_context_manager_with_parens():\n context = MockContext()\n context.config = DICT_CAPABILITIES_CONFIG\n\n @mite_selenium()\n async def test(context):\n pass\n\n # patch with async decorator misbehaving\n with patch(\"mite_selenium.SeleniumRemote\", autospec=True) as mock_remote:\n await test(context)\n\n mock_remote.assert_called()\n mock_remote().quit.assert_called()\n\n\ndef _setup_wrapper(capabilites):\n context = MockContext()\n context.config = capabilites\n return _SeleniumWrapper(context)\n","repo_name":"sky-uk/mite","sub_path":"test/test_webdriver.py","file_name":"test_webdriver.py","file_ext":"py","file_size_in_byte":8670,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"49"} +{"seq_id":"16853082513","text":"import codecs\narquivo = codecs.open('arq15.txt', mode='w+', encoding='utf-8')\nprint('-=-' * 30)\nwhile True:\n string = input('BODY: ').replace('\"', '').replace('\\n', ' ')\n body = string.split(' ')\n for i in body:\n arquivo.write(f'{i} ')\n arquivo.write('\\n')\n print()\n op = str(input('Continuar? '))\n if op in 'Nn':\n break\narquivo.close()","repo_name":"MaksonViini/Data-Science-Projects","sub_path":"Maratona Behind the Code 2020/03_FIAP/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"38213439017","text":"# -*- coding: UTF-8 -*-\nfrom .. import Provider as DateTimeProvider\n\n\nclass Provider(DateTimeProvider):\n\n def day_of_week(self):\n day = self.date('%w')\n DAY_NAMES = {\n \"0\": \"Dimanche\",\n \"1\": \"Lundi\",\n \"2\": \"Mardi\",\n \"3\": \"Mercredi\",\n \"4\": \"Jeudi\",\n \"5\": \"Vendredi\",\n \"6\": \"Samedi\",\n }\n return DAY_NAMES[day]\n\n def month_name(self):\n month = self.month()\n MONTH_NAMES = {\n \"01\": \"Janvier\",\n \"02\": \"Février\",\n \"03\": \"Mars\",\n \"04\": \"Avril\",\n \"05\": \"Mai\",\n \"06\": \"Juin\",\n \"07\": \"Juillet\",\n \"08\": \"Août\",\n \"09\": \"Septembre\",\n \"10\": \"Octobre\",\n \"11\": \"Novembre\",\n \"12\": \"Décembre\",\n }\n return MONTH_NAMES[month]\n","repo_name":"Symbo1/wsltools","sub_path":"wsltools/utils/faker/providers/date_time/fr_FR/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"49"} +{"seq_id":"7039752773","text":"import cv2\nfrom clarifai.rest import ClarifaiApp\nfrom clarifai.rest import Image as ClImage\nimport json\nimport fs\n\ncv2.namedWindow(\"preview\")\nvc = cv2.VideoCapture(0)\n\nif vc.isOpened(): # try to get the first frame\n rval, frame = vc.read()\nelse:\n rval = False\n\nwhile rval:\n cv2.imshow(\"preview\", frame)\n cv2.imwrite('./imgCaptured.png',frame)\n rval, frame = vc.read()\n key = cv2.waitKey(20)\n if key == 27: # exit on ESC\n break\ncv2.destroyWindow(\"preview\")\n\napp = ClarifaiApp(api_key='444fb551757f45e0b124892399e5b760')\n\nmodel = app.models.get('general-v1.3')\nimage = ClImage(file_obj=open('./imgCaptured.png', 'rb'))\n\ncontent = json.dumps(model.predict([image]))\nstrings = json.loads(content)\n\nimageText = \"There is a \" + strings[\"outputs\"][0][\"data\"][\"concepts\"][0][\"name\"]+ \" ahead!\"\n\nprint(imageText)\n \nfs = open(\"textToSpeech.txt\", \"w\")\nfs.write(imageText)\nfs.close();\n\n","repo_name":"oumoubalde/LehmanHackathon2018","sub_path":"camAccess.py","file_name":"camAccess.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"15854395878","text":"min_message_count = 100\nmax_message_count = 10000\n\nmin_message_length = 5\n\nmax_message_cache_size = 100\n\nprivate_replies = '嗯 啊 哦 诶 噫 咦 哈 嚯 呼 喂 哇 哼 呸 哟 哎 唉 啊咧咧 哎哟 嗨 噗'.split()\n\ncreator = 345060487\n","repo_name":"KumaTea/MarkovZHBot","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"31721126776","text":"from 爬虫study import url_manager\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport pprint\n\ndef download_all_htmls():\n #构建分页数字列表\n page_indexs = range(0,250,25)\n htmls = []\n for index in page_indexs:\n new_url = f\"https://movie.douban.com/top250?start={index}&filter=\"\n print(new_url)\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.56'}\n r = requests.get(url = new_url, headers= headers)\n if r.status_code != 200:\n raise Exception(\"status_error\",r.status_code)\n htmls.append(r.text)\n print(len(htmls))\n return htmls\n\ndef parser_single_html(html):\n \"\"\"\n 解析单个HTML,得到数据\n @return list({\"link\",\"title\",[label]})\n \"\"\"\n soup = BeautifulSoup(html,\"html.parser\")\n article_items = (soup.find(\"div\",class_ = \"article\")\n .find(\"ol\",class_ = \"grid_view\")\n .find_all(\"div\",calss_ = \"item\"))\n data = []\n for article_item in article_items:\n rank = article_item.find(\"div\",class_ = \"pic\").find(\"em\").get_text()\n print(rank)\n img = article_item.find(\"img\").get('src')\n\n info = article_item.find(\"div\",class_ = \"info\")\n title = info.find(\"div\",class_ = \"hd\").find(\"span\",class_ =\"title\").get_text()\n stars = (info.find(\"div\",class_ = \"bd\")\n .find(\"p\",class_ = \"star\")\n .find_all(\"span\"))\n rating_star = stars[0][\"class\"][0]\n rating_num = stars[1].get_text()\n commments_num = stars[3].get_text()\n\n data.append(rank)\n # append({\n # \"rank\":rank,\n # \"titel\":title,\n # \"image\":img,\n # \"rating_star\": rating_star.replace(\"rating\",\"\").replace(\"-t\",\"\"),\n # \"rating_num\": rating_num,\n # \"commments_num\": commments_num.replace(\"评价人数\",\"\")})\n print(len(data))\n return data\n\n \nif __name__ == '__main__':\n htmls = download_all_htmls()\n\n datas = []\n for html in htmls:\n parser_single_html(html)\n\n datas.append(parser_single_html(html))\n df = pd.DataFrame(datas)\n df.to_excel(\"豆瓣电影TOP205.xlsx\")\n \n\n \n \n \n","repo_name":"Bzy924604345/Python_crawler_study","sub_path":"douban_top250.py","file_name":"douban_top250.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"33992038158","text":"#!/usr/bin/env python3\nimport json\nimport logging\nimport os\n\nimport maproulette\n\ntry:\n MAPROULETTE_API_KEY = os.environ['MAPROULETTE_API_KEY']\nexcept KeyError:\n raise KeyError(\n \"Please set the MapRoulette API key in environment variable MAPROULETTE_API_KEY. \"\n \"Get it from https://maproulette.org/user/profile\")\n\nPROJECT_ID = 41947\n\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\nlogging.info(\"Get projects from MapRoulette\")\nconfig = maproulette.Configuration(api_key=MAPROULETTE_API_KEY)\nproject_api = maproulette.Project(config)\nchallenge_api = maproulette.Challenge(config)\n\nproject_challenges = project_api.get_project_challenges(PROJECT_ID, limit=100)['data']\nall_challenges = []\nfor challenge in project_challenges:\n if 23334 <= challenge['id'] <= 23346:\n logging.info(\"Exporting challenge %d: %s\", challenge['id'], challenge['name'])\n # Need both CSV and GeoJSON: The CSV has the mapper name, and the GeoJSON has the geometry\n challenge_status_csv = challenge_api.extract_task_summaries(challenge['id'], limit=10_000, page=0,\n status=\"0,1,2,3,4,5,6,9\")\n with open(f\"data/challenge_{challenge['id']}_tasks.csv\", 'w') as outfile:\n outfile.write(challenge_status_csv['data'])\n challenge_status_geojson = challenge_api.get_challenge_geojson(challenge['id'])\n with open(f\"data/challenge_{challenge['id']}_tasks.geojson\", 'w') as outfile:\n json.dump(challenge_status_geojson['data'], outfile)\nlogging.info(\"Done\")\n","repo_name":"hfs/landuse_without_buildings","sub_path":"05_fetch_old_challenges.py","file_name":"05_fetch_old_challenges.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"49"} +{"seq_id":"39383847258","text":"\"\"\"\nImplementation of tissue-specific graph walk with RWR\n\n\"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport argparse\nimport sklearn.preprocessing\nfrom scipy.stats import spearmanr\n# convergence criterion - when vector L1 norm drops below 10^(-6)\n# (this is the same as the original RWR paper)\nCONV_THRESHOLD = 0.000001\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\ndef isNum(x):\n try:\n float(x)\n return True\n except:\n return False\n\nclass Walker:\n \"\"\" Class for multi-graph walk to convergence, using matrix computation.\n\n Random walk with restart (RWR) algorithm adapted from:\n\n Kohler S, Bauer S, Horn D, Robinson PN. Walking the interactome for\n prioritization of candidate disease genes. The American Journal of Human\n Genetics. 2008 Apr 11;82(4):949-58.\n\n Attributes:\n -----------\n og_matrix (np.array) : The column-normalized adjacency matrix\n representing the original graph LCC, with no\n nodes removed\n teleport_matrix (np.array): The column-normalized adjacency matrix\n representing the graph LCC, which adds additional edges for teleport \n restart_prob (float) : The probability of restarting from the source\n node for each step in run_path (i.e. r in the\n original Kohler paper RWR formulation)\n normalize (bool) : Whether normalizing p0 to [0,1]\n \"\"\"\n\n def __init__(self, original_ppi, teleport_ppi, remove_nodes=[], constantWeight=False, absWeight=False, addBidirectionEdge=False):\n self._build_matrices(original_ppi, teleport_ppi, remove_nodes, constantWeight, absWeight, addBidirectionEdge)\n self.dic_node2idx = dict([(node, i) for i, node in enumerate(self.OG.nodes())])\n\n def run_exp(self, seed2weight, restart_prob, teleport_prob=0.5, normalize=False, node_list=[]):\n #NP for one sample\n \"\"\" Run a multi-graph random walk experiment, and print results.\n\n Parameters:\n -----------\n seed2weight (dictionary): The source node indices (i.e. a list of Entrez\n gene IDs)\n restart_prob (float): As above\n teleport_prob (float): As above\n normalize (bool): As above\n \"\"\"\n self.restart_prob = restart_prob\n self.teleport_prob = teleport_prob\n # set up the starting probability vector\n criteria_p=self._set_up_p0(seed2weight)\n #mask TG with 0\n p_0 = self._set_up_p0(seed2weight)\n if normalize == True:\n p_0 /= np.sum(p_0) # normalize\n diff_norm = 1\n # this needs to be a deep copy, since we're reusing p_0 later\n p_t = np.copy(p_0)\n \n # arr_p includes all p_t for tracing\n arr_p = np.empty((len(p_t),1))\n arr_p[:,0] = p_t\n \n while (diff_norm > CONV_THRESHOLD):\n # first, calculate p^(t + 1) from p^(t)\n p_t_1 = self._calculate_next_p(p_t, p_0)\n if normalize == True:\n p_t_1 /= np.sum(p_t_1) # normalize\n # calculate L1 norm of difference between p^(t + 1) and p^(t),\n # for checking the convergence condition\n diff_norm = np.linalg.norm(np.subtract(p_t_1, p_t), 1)\n # then, set p^(t) = p^(t + 1), and loop again if necessary\n # no deep copy necessary here, we're just renaming p\n p_t = p_t_1\n # append p_t to arr_p\n arr_p = np.c_[arr_p, p_t]\n if arr_p.shape[1] >= 50000:\n break\n print('%d iterated'%(arr_p.shape[1]))\n # now, generate and print a rank list from the final prob vector\n if node_list:#if I want to get propagation result only from selected node list\n gene_idx = dict(zip(self.OG.nodes(), range(len(self.OG.nodes()))))\n output = []\n for node in node_list:\n i = gene_idx[node]\n output.append([node,arr_p[i,-1],arr_p[i,:].tolist()])\n return output\n #return list(self._generate_prob_list(arr_p, node_list))\n else:\n gene_idx = dict(zip(self.OG.nodes(), range(len(self.OG.nodes()))))\n output = []\n for node in sorted(self.OG.nodes()):\n i = gene_idx[node]\n output.append([node,arr_p[i,-1],arr_p[i,:].tolist()])\n return output\n #return list(self._generate_rank_list(arr_p))\n\n def _generate_prob_list(self, p_t, node_list):\n gene_probs = dict(zip(self.OG.nodes(), p_t.tolist()))\n for node in node_list:\n yield node, gene_probs[node]\n\n def _generate_rank_list(self, p_t):\n \"\"\" Return a rank list, generated from the final probability vector.\n\n Gene rank list is ordered from highest to lowest probability.\n \"\"\"\n gene_probs = zip(self.OG.nodes(), p_t.tolist())\n # sort by probability (from largest to smallest), and generate a\n # sorted list of Gene IDs\n for s in sorted(gene_probs, key=lambda x: x[0]):\n yield s[0], s[1]\n\n\n def _calculate_next_p(self, p_t, p_0):\n \"\"\" Calculate the next probability vector. \"\"\"\n if self.teleport_prob is not None:\n epsilon = np.squeeze(np.asarray(np.dot(self.og_matrix, p_t)))\n no_restart = epsilon * (1 - self.restart_prob)\n epsilon_teleport = np.squeeze(np.asarray(np.dot(self.tg_matrix, p_t)))\n else:\n epsilon = np.squeeze(np.asarray(np.dot(self.og_matrix, p_t)))\n no_restart = epsilon * (1 - self.restart_prob)\n restart = p_0 * self.restart_prob \n \n return np.add(no_restart, restart)*(1-self.teleport_prob) + epsilon_teleport*self.teleport_prob\n\n def _set_up_p0(self, seed2weight,set_TF=None):\n \"\"\" Set up and return the 0th probability vector. \"\"\"\n \n p_0 = [0] * self.OG.number_of_nodes()\n weightSum = 0.0\n for seed, weight in seed2weight.items():\n if seed not in self.dic_node2idx:\n #print \"Source node %s is not in original graph. It is ignored.\"%(seed)\n continue\n weightSum += seed2weight[seed]\n for seed, weight in seed2weight.items():\n if seed not in self.dic_node2idx:\n continue\n idx = self.dic_node2idx[seed]\n p_0[idx] = seed2weight[seed]\n #p_0[idx] = seed2weight[seed]/weightSum\n return np.array(p_0)\n\n\n def _build_matrices(self, original_ppi, teleport_ppi, remove_nodes, constantWeight, absWeight, addBidirectionEdge):\n \"\"\" Build column-normalized adjacency matrix for each graph.\n\n NOTE: these are column-normalized adjacency matrices (not nx\n graphs), used to compute each p-vector\n \"\"\"\n original_graph = self._build_og(original_ppi, constantWeight, absWeight, addBidirectionEdge)\n\n if remove_nodes:\n # remove nodes, then get the largest connected component once\n # the nodes are removed\n original_graph.remove_nodes_from(remove_nodes)\n original_graph = max(\n nx.connected_component_subgraphs(original_graph),\n key=len)\n\n self.OG = original_graph\n og_not_normalized = nx.to_numpy_matrix(original_graph)\n self.og_matrix = self._normalize_rows(np.transpose(og_not_normalized))\n\n\n teleport_graph = self._build_og(teleport_ppi, constantWeight, absWeight, addBidirectionEdge)\n \n if remove_nodes:\n # remove nodes, then get the largest connected component once\n # the nodes are removed\n teleport_graph.remove_nodes_from(remove_nodes)\n teleport_graph = max(\n nx.connected_component_subgraphs(original_graph),\n key=len)\n\n self.TG = teleport_graph ##nx object \n\n tg_tmp = nx.to_pandas_adjacency(teleport_graph)\n tg_df = pd.DataFrame(np.zeros(og_not_normalized.shape), index=original_graph.nodes, columns=original_graph.nodes)\n tg_df.update(tg_tmp) \n\n tg_not_normalized = tg_df.to_numpy() \n self.tg_matrix = self._normalize_rows(np.transpose(tg_not_normalized))\n \n\n def _build_og(self, original_ppi, constantWeight=False, absWeight=False, addBidirectionEdge=False):\n \"\"\" Build the original graph, without any nodes removed. \"\"\"\n\n try:\n graph_fp = open(original_ppi, 'r')\n except IOError:\n sys.exit(\"Could not open file: {}\".format(original_ppi))\n\n G = nx.DiGraph()\n edge_list = []\n\n # parse network input\n for line in graph_fp.readlines():\n split_line = line.rstrip().split('\\t')\n #if len(split_line) > 3:\n # # assume input graph is in the form of HIPPIE network\n # edge_list.append((split_line[1], split_line[3],\n # float(split_line[4])))\n if len(split_line) < 3:\n # assume input graph is a simple edgelist without weights\n #edge_list.append((split_line[0], split_line[1], float(1)))\n weight = 1.0\n else:\n # assume input graph is a simple edgelist with weights\n #edge_list.append((split_line[0], split_line[1],\n # float(split_line[2])))\n weight = float(split_line[2])\n if constantWeight:\n weight = 1.0\n if absWeight:\n weight = abs(weight)\n #edge_list.append((split_line[0], split_line[1], float(weight)))\n edge_list.append((split_line[0], split_line[1], 1))\n if addBidirectionEdge:\n edge_list.append((split_line[1], split_line[0], float(weight)))\n\n G.add_weighted_edges_from(edge_list)\n graph_fp.close()\n return G\n\n\n def _normalize_cols(self, matrix):\n \"\"\" Normalize the columns of the adjacency matrix \"\"\"\n return sklearn.preprocessing.normalize(matrix, norm='l1', axis=0)\n \n def _normalize_rows(self, matrix):\n \"\"\" Normalize the rows of the adjacency matrix \"\"\"\n return sklearn.preprocessing.normalize(matrix, norm='l1', axis=1)\n\ndef main_propagation(argv):\n # set up argument parsing\n parser = argparse.ArgumentParser(usage='''\\\n python %(prog)s input_graph teleport_graph seed -o myout -e 0.01\n ''')\n parser.add_argument('input_graph', help='Original graph input file, in edge list format')\n parser.add_argument('teleport_graph', help='Teleport graph input file, in edge list format')\n parser.add_argument('seed', help='Seed file, to pull start nodes from')\n parser.add_argument('-o',required=True, help='outfile')\n parser.add_argument('-e', '--restart_prob', type=float, default=0.1, help='Restart probability for random walk')\n parser.add_argument('--teleport_prob',type=float, default=0.5)\n parser.add_argument('-constantWeight', default='False', choices=['True', 'False'], help='Whether constant weight or not')\n parser.add_argument('-absoluteWeight', default='False', choices=['True', 'False'], help='Whether absolute weight or not')\n parser.add_argument('-addBidirectionEdge', default='False', choices=['True', 'False'], help='Whether adding bidirection edges')\n parser.add_argument('-normalize', default='False', choices=['True', 'False'], help='Whether normalizing p0 or not')\n args = parser.parse_args()\n\n try:\n fp = open(args.seed, \"r\")\n except IOError:\n sys.exit(\"Error opening file {}\".format(args.seed))\n \n lst_columnName=['0']\n lst_seed=[]\n lst_weights=[]\n for line in fp.readlines():\n s = line.rstrip().split()\n if len(s) >= 2:\n if not isNum(s[1]):#header\n lst_columnName=s[1:]\n else:\n lst_columnName=[str(i) for i in np.arange(len(s[1:]))+1] \n seed = s[0]\n if len(s) == 1: #if only the gene lists are given, set weights to 1\n weights = [1.0]\n if len(s) >= 2:\n weights = list(map(float,s[1:]))\n lst_seed.append(seed)\n lst_weights.append(weights)\n arr_weights=np.array(lst_weights)\n fp.close()\n \n # run the experiments, and write a rank list to stdout\n dic_node2weights={}\n set_nodes=set()\n\n lst_wk = []\n wk = Walker(args.input_graph, args.teleport_graph, constantWeight=str2bool(args.constantWeight), absWeight=str2bool(args.absoluteWeight), addBidirectionEdge=str2bool(args.addBidirectionEdge))#1 wk for 1 input graph\n set_nodes |= set(wk.OG.nodes())\n lst_wk.append(wk)\n \n column_name=[]\n for idx, wk in enumerate(lst_wk):#if there's multiple input graphs\n for j in range(arr_weights.shape[1]): #iterate (# samples) times\n column_name.append(lst_columnName[j])\n if sum(np.abs(arr_weights[:,j])) == 0.0:\n for node in set_nodes:\n if node not in dic_node2weights:\n dic_node2weights[node]=[]\n dic_node2weights[node].append(0.0)\n continue\n seed2weight=dict()\n for ii in range(len(lst_seed)):\n seed2weight[lst_seed[ii]]=arr_weights[ii,j]\n lst_node_weight = wk.run_exp(seed2weight, args.restart_prob, normalize=str2bool(args.normalize))\n set_tmpNodes=set()\n for node, weight, all_weight in lst_node_weight:\n if node not in dic_node2weights:\n dic_node2weights[node]=[]\n dic_node2weights[node].append(weight)\n set_tmpNodes.add(node)\n for node in set_nodes-set_tmpNodes:\n if node not in dic_node2weights:\n dic_node2weights[node]=[]\n dic_node2weights[node].append(0.0)\n \n OF=open(args.o,'w')\n #OF.write('Gene\\t'+'\\t'.join(column_name)+'\\n')\n for node, weights in dic_node2weights.items():\n #OF.write('\\t'.join(map(str,[node]+all_weight))+'\\n')\n OF.write('\\t'.join(map(str,[node]+weights))+'\\n')\n OF.flush()\n OF.close()\n\nif __name__ == '__main__':\n main_propagation(sys.argv)\n","repo_name":"DabinJeong/Multi-omics_biomarker","sub_path":"modules/network_propagation.py","file_name":"network_propagation.py","file_ext":"py","file_size_in_byte":17239,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"49"} +{"seq_id":"32798993568","text":"\"\"\"add device model\n\nRevision ID: 8b52f00d97ab\nRevises: 7390a885f72a\nCreate Date: 2021-01-10 18:23:22.802879-08:00\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8b52f00d97ab'\ndown_revision = '7390a885f72a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('devices',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('long_name', sa.String(), nullable=True),\n sa.Column('short_name', sa.String(), nullable=True),\n sa.Column('manufacturer_sku', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_devices_id'), 'devices', ['id'], unique=False)\n op.create_table('assays',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('long_name', sa.String(), nullable=True),\n sa.Column('short_name', sa.String(), nullable=True),\n sa.Column('loinc_code', sa.String(), nullable=True),\n sa.Column('cpt_code', sa.String(), nullable=True),\n sa.Column('device_id', sa.Integer(), nullable=True),\n sa.Column('cash_price', sa.Float(), nullable=True),\n sa.Column('is_available', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_assays_id'), 'assays', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_assays_id'), table_name='assays')\n op.drop_table('assays')\n op.drop_index(op.f('ix_devices_id'), table_name='devices')\n op.drop_table('devices')\n # ### end Alembic commands ###\n","repo_name":"kalamos-care/full-stack-portal","sub_path":"backend/app/alembic/versions_moved/8b52f00d97ab_add_device_model.py","file_name":"8b52f00d97ab_add_device_model.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"72110021908","text":"from discord.ext import commands\nfrom print_queue import send_message\nfrom threading import Thread\nfrom utils import (search_between, get_log_filename, get_code, stats)\nimport datetime\nimport aiofiles\nimport judge0api as api\nfrom cogs.problems import current_problem, get_problem, get_current_problem\nfrom utils import LimitedSizeDict\n\ninputs = LimitedSizeDict(size_limit=1000)\n\nclient = api.Client(\"http://127.0.0.1\")\n\ndef set_input(key, val):\n inputs[key] = val\n\ndef test_code(arg, attachment, lang_id, problem_name, channel_id):\n code = get_code(arg, attachment)\n print('Running test code: ', code.decode())\n cases = get_problem(problem_name).cases\n message = ''\n for stdin in cases:\n expected_output = cases[stdin]\n submission = api.submission.submit(client, code, lang_id,\n stdin=stdin.encode(), expected_output=expected_output.encode())\n status = submission.status\n message += 'Status: ' + status['description'] + '\\n'\n message += stats(submission.time, submission.memory)\n send_message(channel_id, message)\n\n\n\ndef run_code(arg, input_data, attachment, lang_id, channel_id):\n code = get_code(arg, attachment)\n print('Running code: ', code.decode())\n submission = api.submission.submit(client, code, lang_id,\n stdin=input_data.encode())\n status = submission.status\n output = submission.stdout\n errors = submission.stderr\n compile_output = submission.compile_output\n if output:\n output = output.decode()\n if errors:\n errors = errors.decode()\n if compile_output:\n compile_output = compile_output.decode()\n\n message = 'Status: ' + status['description'] + '\\n'\n\n if output:\n message += 'Output: ```\\n' + output + '\\n```'\n else:\n message += 'No output sent.\\n'\n if errors:\n message += 'Errors: ```\\n' + errors + '\\n```'\n if compile_output:\n message += ('Compiler output: ```\\n' + compile_output\n + '\\n```\\n')\n\n message += stats(submission.time, submission.memory)\n send_message(channel_id, message)\n\nclass Languages(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n async def code_command(self, ctx, arg, lang_id):\n author_id = ctx.author.id\n problem_name = get_current_problem(author_id)\n channel_id = ctx.message.channel.id\n\n if ctx.message.attachments:\n attachment = await ctx.message.attachments[0].read()\n else:\n attachment = None\n\n # Remove the space after the command.\n arg = arg[1:]\n\n if problem_name:\n thread = Thread(target=test_code, args=(arg, attachment, lang_id,\n problem_name, channel_id))\n else:\n if author_id in inputs:\n input_data = inputs[author_id]\n else:\n input_data = ''\n\n thread = Thread(target=run_code, args=(arg,\n input_data, attachment, lang_id, channel_id))\n thread.start()\n\n @commands.command(rest_is_raw=True)\n async def py(self, ctx, *, arg):\n \"\"\"Runs Python (3.8.1) code.\n Usage: $py (code)\n \"\"\"\n await self.code_command(ctx, arg, 71)\n\n @commands.command(rest_is_raw=True)\n async def py2(self, ctx, *, arg):\n \"\"\"Runs Python (2.7.17) code.\n Usage: $py2 (code)\n \"\"\"\n await self.code_command(ctx, arg, 70)\n\n @commands.command(rest_is_raw=True)\n async def cpp(self, ctx, *, arg):\n \"\"\"Runs C++ (GCC 9.2.0) code.\n Usage: $cpp (code)\n \"\"\"\n await self.code_command(ctx, arg, 54)\n\n @commands.command(rest_is_raw=True)\n async def c(self, ctx, *, arg):\n \"\"\"Runs C (GCC 9.2.0) code.\n Usage: $c (code)\n \"\"\"\n await self.code_command(ctx, arg, 50)\n\n @commands.command(rest_is_raw=True)\n async def cs(self, ctx, *, arg):\n \"\"\"Runs C# (Mono 6.6.0.161) code.\n Usage: $cs (code)\n \"\"\"\n await self.code_command(ctx, arg, 51)\n\n @commands.command(rest_is_raw=True)\n async def oc(self, ctx, *, arg):\n \"\"\"Runs Objective C (Clang 7.0.1) code.\n Usage: $oc (code)\n \"\"\"\n await self.code_command(ctx, arg, 79)\n\n @commands.command(rest_is_raw=True)\n async def java(self, ctx, *, arg):\n \"\"\"Runs Java (OpenJDK 13.0.1) code.\n Usage: $java (code)\n \"\"\"\n await self.code_command(ctx, arg, 62)\n\n @commands.command(rest_is_raw=True)\n async def js(self, ctx, *, arg):\n \"\"\"Runs JavaScript (Node.js 12.14.0) code.\n Usage: $js (code)\n \"\"\"\n await self.code_command(ctx, arg, 63)\n\n @commands.command(rest_is_raw=True)\n async def sql(self, ctx, *, arg):\n \"\"\"Runs SQL (SQLite 3.27.2) code.\n Usage: $sql (code)\n \"\"\"\n await self.code_command(ctx, arg, 82)\n\n @commands.command(rest_is_raw=True)\n async def vb(self, ctx, *, arg):\n \"\"\"Runs Visual Basic .NET (vbnc 0.0.0.5943) code.\n Usage: $vb (code)\n \"\"\"\n await self.code_command(ctx, arg, 84)\n\n @commands.command(rest_is_raw=True)\n async def octave(self, ctx, *, arg):\n \"\"\"Runs Octave (5.1.0) code.\n Usage: $octave (code)\n \"\"\"\n await self.code_command(ctx, arg, 66)\n\n @commands.command(rest_is_raw=True)\n async def clisp(self, ctx, *, arg):\n \"\"\"Runs Common LISP (SBCL 2.0.0) code.\n Usage: $clisp (code)\n \"\"\"\n await self.code_command(ctx, arg, 55)\n\n @commands.command(rest_is_raw=True)\n async def ass(self, ctx, *, arg):\n \"\"\"Runs Assembly (NASM 2.14.02) code.\n Usage: $ass (code)\n \"\"\"\n await self.code_command(ctx, arg, 45)\n\n @commands.command(rest_is_raw=True)\n async def bash(self, ctx, *, arg):\n \"\"\"Runs Bash (5.0.0) code.\n Usage: $bash (code)\n \"\"\"\n await self.code_command(ctx, arg, 46)\n\n @commands.command(rest_is_raw=True)\n async def php(self, ctx, *, arg):\n \"\"\"Runs PHP (7.4.1) code.\n Usage: $php (code)\n \"\"\"\n await self.code_command(ctx, arg, 68)\n\n @commands.command(rest_is_raw=True)\n async def lua(self, ctx, *, arg):\n \"\"\"Runs Lua (5.3.5) code.\n Usage: $lua (code)\n \"\"\"\n await self.code_command(ctx, arg, 64)\n\n @commands.command(rest_is_raw=True)\n async def pascal(self, ctx, *, arg):\n \"\"\"Runs Pascal (FPC 3.0.4) code.\n Usage: $pascal (code)\n \"\"\"\n await self.code_command(ctx, arg, 67)\n\n @commands.command(rest_is_raw=True)\n async def scala(self, ctx, *, arg):\n \"\"\"Runs Scala (2.13.2) code.\n Usage: $scala (code)\n \"\"\"\n await self.code_command(ctx, arg, 81)\n\n\n @commands.command(rest_is_raw=True)\n async def swift(self, ctx, *, arg):\n \"\"\"Runs Swift (5.2.3) code.\n Usage: $swift (code)\n \"\"\"\n await self.code_command(ctx, arg, 83)\n\n @commands.command(rest_is_raw=True)\n async def rust(self, ctx, *, arg):\n \"\"\"Runs Rust (1.40.0) code.\n Usage: $rust (code)\n \"\"\"\n await self.code_command(ctx, arg, 73)\n\n @commands.command(rest_is_raw=True)\n async def go(self, ctx, *, arg):\n \"\"\"Runs Go (1.13.5) code.\n Usage: $go (code)\n \"\"\"\n await self.code_command(ctx, arg, 60)\n\n @commands.command(rest_is_raw=True)\n async def ts(self, ctx, *, arg):\n \"\"\"Runs TypeScript (3.7.4) code.\n Usage: $ts (code)\n \"\"\"\n await self.code_command(ctx, arg, 74)\n\n @commands.command(rest_is_raw=True)\n async def kotlin(self, ctx, *, arg):\n \"\"\"Runs Kotlin (1.3.70) code.\n Usage: $kotlin (code)\n \"\"\"\n await self.code_command(ctx, arg, 78)\n\n @commands.command(rest_is_raw=True)\n async def rb(self, ctx, *, arg):\n \"\"\"Runs Ruby (2.7.0) code.\n Usage: $rb (code)\n \"\"\"\n await self.code_command(ctx, arg, 72)\n\n @commands.command(rest_is_raw=True)\n async def haskell(self, ctx, *, arg):\n \"\"\"Runs Haskell (GHC 8.8.1) code.\n Usage: $haskell (code)\n \"\"\"\n await self.code_command(ctx, arg, 61)\n\n @commands.command(rest_is_raw=True)\n async def basic(self, ctx, *, arg):\n \"\"\"Runs Basic (FBC 1.07.1) code.\n Usage: $basic (code)\n \"\"\"\n await self.code_command(ctx, arg, 47)\n\n @commands.command(rest_is_raw=True)\n async def fortran(self, ctx, *, arg):\n \"\"\"Runs Fortran (GFortran 9.2.0) code.\n Usage: $fortran (code)\n \"\"\"\n await self.code_command(ctx, arg, 59)\n\n @commands.command(rest_is_raw=True)\n async def r(self, ctx, *, arg):\n \"\"\"Runs R (4.0.0) code.\n Usage: $r (code)\n \"\"\"\n await self.code_command(ctx, arg, 80)\n\n @commands.command(rest_is_raw=True)\n async def erlang(self, ctx, *, arg):\n \"\"\"Runs Erlang (OTP 22.2) code.\n Usage: $erlang (code)\n \"\"\"\n await self.code_command(ctx, arg, 58)\n\n @commands.command(rest_is_raw=True)\n async def cobol(self, ctx, *, arg):\n \"\"\"Runs COBOL (GnuCOBOL 2.2) code.\n Usage: $cobol (code)\n \"\"\"\n await self.code_command(ctx, arg, 77)\n\n @commands.command(rest_is_raw=True)\n async def d(self, ctx, *, arg):\n \"\"\"Runs D (DMD 2.089.1) code.\n Usage: $d (code)\n \"\"\"\n await self.code_command(ctx, arg, 56)\n\n @commands.command(rest_is_raw=True)\n async def elixir(self, ctx, *, arg):\n \"\"\"Runs Elixir (1.9.4) code.\n Usage: $elixir (code)\n \"\"\"\n await self.code_command(ctx, arg, 57)\n\n @commands.command(rest_is_raw=True)\n async def ocaml(self, ctx, *, arg):\n \"\"\"Runs OCaml (4.09.0) code.\n Usage: $ocaml (code)\n \"\"\"\n await self.code_command(ctx, arg, 65)\n\n @commands.command(rest_is_raw=True)\n async def text(self, ctx, *, arg):\n \"\"\"Displays plain text.\n Usage: $text (code)\n \"\"\"\n await self.code_command(ctx, arg, 43)\n","repo_name":"ia03/vummer","sub_path":"cogs/languages.py","file_name":"languages.py","file_ext":"py","file_size_in_byte":9982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"33521070412","text":"import numpy as np\nimport re\nfrom math import sqrt\nfrom update_tag_scheme import update_tag_scheme\nfrom gensim.test.utils import datapath, get_tmpfile\nfrom gensim.models import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nLIMIT_LENGTH_OF_SENTENCES = 1000\nGLOVE_FILENAME = \"/home/huuthuc/Downloads/glove.6B/glove.6B.100d.txt\"\n\nclass Dictionary():\n def __init__(self):\n self.vocabs = list()\n self.word2idx = dict()\n \n \n \n def add_word(self, word):\n if word not in self.vocabs:\n self.vocabs.append(word)\n self.word2idx[word] = len(self.vocabs) - 1\n \n\n def to_id(self, word):\n if word in self.vocabs:\n return self.word2idx[word]\n else:\n return 0\n\n\n def to_word(self, idx):\n if idx < len(self.vocabs):\n # print(idx, '\\t', len(self.vocabs))\n return self.vocabs[idx]\n else:\n return self.vocabs[0]\n\n\n def get_word_embedding(self, embedd_size=300):\n n_words = len(self.vocabs)\n return np.random.uniform(-sqrt(3.0/embedd_size), sqrt(3.0/embedd_size), size=(n_words, embedd_size))\n\n\n def get_word_embedding_from_glove(self, glove_fn=GLOVE_FILENAME):\n glove_file = datapath(glove_fn)\n glove_in_w2v_format_fn = \"glove_in_word2vec_format.txt\"\n tmp_file = get_tmpfile(glove_in_w2v_format_fn)\n _ = glove2word2vec(glove_file, tmp_file)\n pretrained_w2v = KeyedVectors.load_word2vec_format(tmp_file)\n \n n_words = len(self.vocabs)\n embedd_size = pretrained_w2v['hello'].shape[0]\n embedd_matrix = np.random.uniform(-sqrt(3.0/embedd_size), sqrt(3.0/embedd_size), size=(n_words, embedd_size))\n\n for word, idx in self.word2idx.items():\n if word in pretrained_w2v:\n embedd_matrix[idx, :] = pretrained_w2v[word]\n \n return embedd_matrix\n\n\n def __len__(self):\n return len(self.vocabs)\n\n\nclass Corpus():\n def __init__(self):\n train_fn = './data/eng.train'\n dev_fn = './data/eng.testa'\n test_fn = './data/eng.testb'\n\n self.train_sentences, self.train_labels, self.train_sequence_lengths = self.read_tsv(train_fn)\n self.dev_sentences, self.dev_labels, self.dev_sequence_lengths = self.read_tsv(dev_fn)\n self.test_sentences, self.test_labels, self.test_sequence_lengths = self.read_tsv(test_fn)\n \n self.word_dictionary = self.create_dictionary(self.train_sentences, word_dictionary=True)\n self.label_dictionary = self.create_dictionary(self.train_labels)\n self.char_dict = self.generate_char_dict()\n self.max_word_len = 30\n\n\n def read_tsv(self, filename):\n sentences = []\n labels = []\n sequence_lengths = []\n sent = []\n label = []\n leng = 0\n\n with open(filename) as fin:\n for line in fin:\n\n row = re.split(\" \", line.strip())\n if len(row) == 1:\n if leng > 0 and leng <= LIMIT_LENGTH_OF_SENTENCES:\n sentences.append(sent)\n labels.append(label)\n sequence_lengths.append(leng)\n leng = 0\n label = []\n sent = []\n elif row[0] != '-DOCSTART-':\n word = row[0].lower().strip()\n sent.append(word)\n label.append(row[-1].strip())\n leng += 1\n update_tag_scheme(labels, sequence_lengths)\n return sentences, labels, sequence_lengths\n \n\n def create_dictionary(self, data, word_dictionary=False):\n dictionary = Dictionary()\n if word_dictionary:\n dictionary.add_word('UNK') # for unknowkn words\n for row in data:\n for value in row:\n dictionary.add_word(value)\n return dictionary\n\n \n def generate_char_dict(self):\n char_dict = {}\n alphabet = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-,;.!?:’\"/|_#$%ˆ&*˜‘+=<>()[]{}'\n for i,c in enumerate(alphabet):\n char_dict[c] = i\n return char_dict\n\n \nif __name__ == '__main__':\n corpus = Corpus()\n a = corpus.word_dictionary.get_word_embedding_from_glove()","repo_name":"caihuuthuc/BiLSTM-CNNs-CRF","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"9456905925","text":"from elasticsearch import Elasticsearch\n\nfrom elasticsearch_simple_client.config import Config\nfrom elasticsearch_simple_client.search_query_builder import SearchQueryBuilder\n\n\nclass Searcher:\n def __init__(self, es_url: str = None):\n self._config = Config.default()\n self._builder = SearchQueryBuilder()\n\n es_url = self._config.es_url if es_url is None else es_url\n\n self._es_connecter = Elasticsearch([es_url], timeout=60)\n\n def execute_search(self, field: str,\n musts: list = None,\n shoulds: list = None,\n query_return_length=None,\n index=None):\n if index is None:\n index = self._config.es_index\n if query_return_length is None:\n query_return_length = self._config.query_return_length\n\n query = self._builder.build_single_index_search_query(musts=musts,\n shoulds=shoulds,\n field=field,\n query_return_length=query_return_length)\n\n es_result = self._es_connecter.search(body=query, index=index)\n return es_result\n","repo_name":"chilledgeek/elasticsearch-simple-client","sub_path":"elasticsearch_simple_client/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"49"} +{"seq_id":"40364344183","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom xnmtorch.modules.initializers import XavierUniform, ConstantInitializer\nfrom xnmtorch.persistence import Serializable\nfrom xnmtorch.persistence.serializable import bare\n\n\ndef group_linear(linears, input, bias=False):\n weights = [linear.weight for linear in linears]\n\n weight = torch.cat(weights, dim=0)\n\n if bias:\n biases = [linear.bias for linear in linears]\n bias_ = torch.cat(biases)\n else:\n bias_ = None\n\n return F.linear(input, weight, bias_)\n\n\nclass Linear(nn.Linear, Serializable):\n def __init__(self, in_features, out_features, bias=True, weight_norm=False, initializer=bare(XavierUniform),\n bias_initializer=bare(ConstantInitializer, val=0)):\n self.initializer = initializer\n self.bias_initializer = bias_initializer\n super().__init__(in_features, out_features, bias)\n self.weight_norm = weight_norm\n if weight_norm:\n nn.utils.weight_norm(self, name='weight')\n\n def reset_parameters(self):\n self.initializer.initialize(self.weight)\n if self.bias is not None:\n self.bias_initializer.initialize(self.bias)\n\n\nclass MaxOut(nn.Module, Serializable):\n \"\"\"\n Project the input up `pool_size` times, then take the maximum of the outputs.\n \"\"\"\n\n def __init__(self, in_features, out_features, pool_size):\n super().__init__()\n self.in_features = in_features\n self.out_fetures = out_features\n self.pool_size = pool_size\n self.linear = nn.Linear(in_features, out_features * pool_size)\n\n def forward(self, inputs):\n original_size = inputs.size()\n\n projected = self.linear(inputs).view(*original_size[:-1], self.out_fetures, self.pool_size)\n out, _ = projected.max(-1)\n return out\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, pool_size={}' \\\n .format(self.in_features, self.out_fetures, self.pool_size)\n\n","repo_name":"felix-schneider/xnmtorch","sub_path":"xnmtorch/modules/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"24758368430","text":"import pickle\nfrom . import utils\nimport numpy as np\nfrom .order_base import OrderBase\n\nimport pdb\n\n'''Ordering for features extracted from the RN network'''\nclass RNOrder(OrderBase):\n def __init__(self, filename, name='RN', normalize=False, how_many=15000, st='test'):\n super().__init__() \n\n self.rn_feats = self.load_features(filename, how_many)\n self.normalize = normalize\n self.st = st\n if normalize:\n self.rn_feats = utils.normalized(self.rn_feats, 1)\n self.name = name\n\n def load_features(self, filename, how_many):\n f = open(filename, 'rb')\n features = pickle.load(f)\n features = [f[1] for f in features]\n features = np.vstack(features)\n features = features[:how_many]\n print('processed #{} features each of size {}'.format(features.shape[0], features.shape[1]))\n return features\n \n def compute_distances(self, query_img_index):\n query_feat = self.rn_feats[query_img_index]\n distances = [utils.l2_dist(query_feat, f) for f in self.rn_feats]\n return distances\n\n def get_name(self):\n return self.name\n\n def get_identifier(self):\n return '{}-norm{}-set{}'.format(self.get_name().replace('\\n','_').replace(' ','-'), self.normalize, self.st)\n\n def length(self):\n return len(self.rn_feats)\n\n#simple test\nimport os\nif __name__ == \"__main__\":\n clevr_dir = '../features'\n idx = 6\n \n s = RNOrder(os.path.join(clevr_dir,'avg_features.pickle'))\n print(s.get(idx))\n","repo_name":"mesnico/learning-relationship-aware-visual-features","sub_path":"order/rn_order.py","file_name":"rn_order.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"49"} +{"seq_id":"13521552483","text":"#!/usr/bin/python3\n'''Contains a get_matrix_size and a matrix_mul'''\n\n\ndef get_matrix_sizes(matrix_1, matrix_2, name_1, name_2):\n '''Computes the size of a matrix and performs some matrix validation.'''\n funcs = (\n lambda txt: '{} must be a list'.format(txt),\n lambda txt: '{} can\\'t be empty'.format(txt),\n lambda txt: '{} must be a list of lists'.format(txt),\n lambda txt: '{} should contain only integers or floats'.format(txt),\n lambda txt: 'each row of {} must be of the same size'.format(txt),\n lambda l: all(map(lambda n: isinstance(n, (int, float)), l)),\n )\n size0 = [0, 0]\n size1 = [0, 0]\n if not isinstance(matrix_1, list):\n raise TypeError(funcs[0](name_1))\n if not isinstance(matrix_2, list):\n raise TypeError(funcs[0](name_2))\n size0[0] = len(matrix_1)\n size1[0] = len(matrix_2)\n if size0[0] == 0:\n raise ValueError(funcs[1](name_1))\n if size1[0] == 0:\n raise ValueError(funcs[1](name_2))\n if not all(map(lambda x: isinstance(x, list), matrix_1)):\n raise TypeError(funcs[2](name_1))\n if not all(map(lambda x: isinstance(x, list), matrix_2)):\n raise TypeError(funcs[2](name_2))\n if all(map(lambda x: len(x) == 0, matrix_1)):\n raise ValueError(funcs[1](name_1))\n if all(map(lambda x: len(x) == 0, matrix_2)):\n raise ValueError(funcs[1](name_2))\n if not all(map(lambda x: funcs[5](x), matrix_1)):\n raise TypeError(funcs[3](name_1))\n if not all(map(lambda x: funcs[5](x), matrix_2)):\n raise TypeError(funcs[3](name_2))\n size0[1] = len(matrix_1[0])\n size1[1] = len(matrix_2[0])\n if not all(map(lambda x: len(x) == size0[1], matrix_1)):\n raise TypeError(funcs[4](name_1))\n if not all(map(lambda x: len(x) == size1[1], matrix_2)):\n raise TypeError(funcs[4](name_2))\n return size0, size1\n\n\ndef matrix_mul(m_a, m_b):\n '''Multiplies 2 matrices.'''\n a_sz, b_sz = get_matrix_sizes(m_a, m_b, 'm_a', 'm_b')\n # AB only works iff column_count in A == row_count in B\n if a_sz[1] != b_sz[0]:\n raise ValueError('m_a and m_b can\\'t be multiplied')\n else:\n res = []\n for row_a in m_a:\n row_res = []\n for i in range(b_sz[1]):\n cell_args = zip(range(a_sz[1]), row_a)\n val = map(lambda x: x[1] * m_b[x[0]][i], cell_args)\n row_res.append(sum(list(val)))\n res.append(row_res)\n return res\n","repo_name":"St-Pardon/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/100-matrix_mul.py","file_name":"100-matrix_mul.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"40321099809","text":"import io\n\nimport pytest\n\nfrom classquiz.config import settings\nfrom classquiz.storage import Storage\n\nsettings = settings()\n\nfile_contents = b\"hello world\"\n\n\ndef test_storage_init():\n with pytest.raises(NotImplementedError):\n Storage(\n backend=\"asdsad\",\n storage_path=settings.storage_path,\n )\n with pytest.raises(ValueError):\n Storage(backend=\"s3\", base_url=None, secret_key=None, access_key=None, storage_path=None)\n with pytest.raises(ValueError):\n Storage(backend=\"local\", storage_path=None)\n\n\nasync def storage_tester(storage: Storage):\n res = await storage.upload(file_name=\"test.txt\", file_data=io.BytesIO(initial_bytes=file_contents))\n assert res is None\n res = storage.download(file_name=\"test.txt\")\n async for chunk in res:\n assert bytes(chunk) == file_contents\n res = await storage.get_file_size(file_name=\"test.txt\")\n assert res == len(file_contents)\n res = await storage.get_file_size(file_name=\"asdsadasdasdadfdsf.txt\")\n assert res is None\n res = await storage.delete(file_names=[\"test.txt\"])\n assert res is None\n res = storage.download(file_name=\"test.txt\")\n async for chunk in res:\n assert chunk is None\n res = await storage.delete(file_names=[\"test.txt\"])\n assert res is None\n\n\n@pytest.mark.asyncio\nasync def test_local():\n storage: Storage = Storage(backend=\"local\", storage_path=settings.storage_path)\n await storage_tester(storage)\n\n\n@pytest.mark.asyncio\nasync def test_minio():\n storage: Storage = Storage(\n backend=\"s3\",\n access_key=\"Q3AM3UQ867SPQQA43P2F\",\n secret_key=\"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\",\n bucket_name=\"classquiz\",\n base_url=\"https://play.min.io\",\n storage_path=None,\n )\n await storage_tester(storage)\n await storage.upload(file_name=\"test.txt\", file_data=io.BytesIO(initial_bytes=file_contents))\n url = await storage.get_url(file_name=\"test.txt\", expiry=20)\n assert url is not None\n","repo_name":"mawoka-myblock/ClassQuiz","sub_path":"classquiz/tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":285,"dataset":"github-code","pt":"49"} +{"seq_id":"58140752027","text":"\"\"\"\nSimulation of Web App Architecture | Case study based on assignment from\n https://www.udemy.com/share/103nlaAEMZdllWTHg=/\n\nService: The Russian Peasant's Algorithm\n\nArchitecture Include:\n\n - App Computer (Modules)\n - Database (--) --> Russian Peasant Algorithm\n - Load Balancer (Algorithm)\n\n+-----+ +-----+ +-----+\n| APP | | APP | | APP |\n| 1 | | 2 | | 3 |\n+-----+ +-----+ +-----+\n\"\"\"\n## Bot names\nimport bot1\nimport bot2\nimport bot3\n\nROBOTS = [bot1, bot2, bot3]\n\nn = -1\n\n\ndef get_robot():\n global n\n n += 1\n return ROBOTS[n % len(ROBOTS)]\n\n\n## Testing Load which needs balancing\nif __name__ == '__main__':\n from random import randint\n\n # simulating num of requests in the next loop\n for i in range(10):\n # some 'requested' numbers\n z = randint(1, 21)\n a = [11, 35, 213, 51, 12, 94, 68][z % 7]\n # a = randint(5,99)\n b = [93, 25, 12, 111, 1337, 89, 1][z % 7]\n # b = randint(5,99)\n\n # run the flow controller to get an available 'bot'\n active_bot = get_robot()\n\n # print the results\n print(active_bot.printName())\n print(f\"{a}x{b}\")\n print(active_bot.multiplyHandler(a, b))\n print(active_bot.lastMultipliedHandler())\n print(\"\")\n\n# End of code - https://github.com/eabdiel\n","repo_name":"eabdiel/python_playground","sub_path":"Russian Peasant Algorithm - Multi Module Flow/flow_controller.Py","file_name":"flow_controller.Py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"21603950769","text":"import sys\nimport numpy as np\nimport cv2\n\n'''\nopencv_version = 4.1.0.25에서 실행\n'''\n\n# 동영상 열기\ncap = cv2.VideoCapture('ch10\\\\videos\\\\tracking1.mp4')\n\nif not cap.isOpened():\n print('Video open failed!')\n sys.exit()\n\n# 트래커 객체 생성\n# ==> 배경이 비슷하면? 검은화면이 띄면 추적 못함\n# ==> 추적하는 객체가 가려지면 놓치게 됨\n'''\n# Kernelized Correlation Filters\n# 그나마 빠르게 동작함\n#tracker = cv2.TrackerKCF_create()\n\n# Minimum Output Sum of Squared Error\n#tracker = cv2.TrackerMOSSE_create()\n'''\n# Discriminative Correlation Filter with Channel and Spatial Reliability\n# 추적은 잘하는 편이지만 속도가 느려\ntracker = cv2.TrackerMIL_create()\n'''\ncv2.TrackerXXX_create() -> \nXXX = Boosting, CSRT, GOTURN, \n KCF, MedianFlow, MIL, \n MOSSE, TLD\n'''\n\n# 첫 번째 프레임에서 추적 ROI 설정\nret, frame = cap.read()\n\nif not ret:\n print('Frame read failed!')\n sys.exit()\n\nrc = cv2.selectROI('frame', frame)\ntracker.init(frame, rc)\n'''\ncv2.Tracker.init(image, boundingBox) -> retval\n\nboundingBox: ROI\n( (x, y, w, h) )\n'''\n\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n print('Frame read failed!')\n sys.exit()\n\n # 추적 & ROI 사각형 업데이트\n ret, rc = tracker.update(frame)\n '''\n cv2.Tracker.update(image) -> retval, boundingBox\n\n retval: True, False.(추적 성공여부)\n '''\n\n # rectangle을 위한 int 변환\n rc = tuple([int(_) for _ in rc])\n cv2.rectangle(frame, rc, (0,0,255), 2)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(20) == ord('q'):\n break\n","repo_name":"kiimy-git/OpenCV-Tutorial","sub_path":"Fastcampus/ch10/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"6067729198","text":"#coding: utf-8\nn, m = map(int, raw_input().split())\nl =[]\nc = -1\nwhile int(n)!=0 or int(m)!=0:\n amb = []\n for r in range(int(m)):\n amb.append([])\n f = raw_input()\n for i in range(int(n)):\n amb[r] = f.split()\n c += 1\n l.append(amb)\n print(l[c])\n n, m = input().split()\nprint(l)\n\ndef espalha(a, i, j): \n linhas = len(a[0]) - 1\n colunas = len(a) - 1\n if a[i][j] == \"I\":\n if j > 0:\n a[i][j-1] = \"I\"\n if i > 0:\n a[i-1][j] = \"I\"\n if j < linhas:\n a[i][j+1] = \"I\"\n if i < colunas:\n a[i+1][j] = \"I\"\n\n if j < linhas:\n return espalha(a, i, j+1)\n else:\n if i < colunas:\n return espalha(a, i+1, j)\n else:\n return None\n\nfor i in range(len(l)):\n espalha(l[i], 0, 0)\n","repo_name":"gabebarbosa/AD1-FP-UFF-CEDERJ-2017-1","sub_path":"q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"5044767925","text":"import enum\nfrom typing import Dict\n\n\nclass Config:\n @enum.unique\n class Type(enum.IntEnum):\n INT = enum.auto()\n FLOAT = enum.auto()\n STR = enum.auto()\n PATH = enum.auto()\n\n configtype2pytype = {\n Type.INT: int,\n Type.FLOAT: float,\n Type.STR: str,\n Type.PATH: str,\n }\n\n def __init__(self) -> None:\n self.config_types: Dict[str, Config.Type] = {}\n self.values = {}\n\n def init_zero_value(self, type):\n if type == self.Type.INT:\n return 0\n elif type == self.Type.FLOAT:\n return 0.0\n elif type == self.Type.STR:\n return \"\"\n elif type == self.Type.PATH:\n return \"\"\n else:\n raise ValueError(f\"Invalid configuration type: {type}\")\n\n def add_config(self, name, type: Type, default_value=None):\n self.config_types[name] = type\n if default_value is not None:\n try:\n pytype = self.configtype2pytype[type]\n converted_val = pytype(default_value)\n self.values[name] = converted_val\n except ValueError:\n self.values[name] = self.init_zero_value(type)\n else:\n self.values[name] = self.init_zero_value(type)\n\n def set_config(self, name, value):\n self.values[name] = value\n\n def is_correct_type(self, name, value):\n type = self.get_type(name)\n pytype = self.configtype2pytype[type]\n try:\n val = pytype(value)\n return True, val\n except ValueError:\n return False, None\n\n def set_value(self, name, value: str):\n stat, val = self.is_correct_type(name, value)\n if stat:\n self.values[name] = val\n return True\n return False\n\n def get_value(self, name):\n return self.values[name]\n\n def get_type(self, name):\n return self.config_types[name]\n\n def to_dict(self) -> dict:\n result = {}\n for name in self.config_types.keys():\n result[name] = self.get_value(name)\n return result\n\n def __iter__(self):\n return iter(self.config_types.items())\n","repo_name":"XiaoXuan42/easyAlgVis","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"25853158194","text":"class Queue:\n def __init__ (self):\n self.queue =[]\n \n def is_empty(self):\n return len(self.queue) == 0\n \n def __len__(self):\n return len(self.queue)\n\n def dequeue(self):\n return self.queue.pop(0) if not self.is_empty() else None\n \n def enqueue(self,data):\n self.queue.append(data)\n\n\nclass MazeRunner:\n \n def __init__(self):\n self.queue = Queue()\n self.directions = [(0, -1), (1, 0), (0, 1), (-1, 0)]\n\n def find_start(self,room: list):\n for i in range(len(room)):\n for j in range(len(room[i])):\n if room[i][j] == \"F\":\n return(j,i)\n return\n \n def find_the_next_move(self, room):\n x,y = self.queue.dequeue()\n for dx, dy in self.directions:\n new_x, new_y = x + dx, y + dy\n if 0 <= new_y < len(room) and 0 <= new_x < len(room[0]) and room[new_y][new_x] == 'O':\n return True\n elif 0 <= new_y < len(room) and 0 <= new_x < len(room[0]) and room[new_y][new_x] == '_':\n self.queue.enqueue((new_x, new_y))\n room[new_y][new_x] = 'X'\n \n \n def search(self, width, height, room):\n x = self.find_start(room)\n if not x :\n return print('Invalid map input.')\n self.queue.enqueue(x)\n if len(room) != int(height):\n return print(\"Invalid map input.\")\n \n for row in room:\n if len(row) != int(width):\n return print(\"Invalid map input.\")\n # Main Loop\n while not self.queue.is_empty():\n print(f'Queue: {self.queue.queue}')\n if self.find_the_next_move(room):\n print('Found the exit portal.')\n return\n return print('Cannot reach the exit portal.')\n\n\nmaze = []\nwidth,height,room = input(\"Enter width, height, and room: \").split(\" \")\nroom = [list(string) for string in room.split(',')]\nmaze_runner =MazeRunner()\nmaze_runner.search(width,height,room)\n\n \n \n ","repo_name":"PcrPz/OOD","sub_path":"Week_4_Queue/4_5_Queue.py","file_name":"4_5_Queue.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3173940446","text":"import typing as T\n\nimport numpy as np\nimport PySimpleGUI as sg\nfrom PIL import ImageTk, Image\n\nfrom rembrain_robot_framework import RobotProcess\n\n\nclass GUIProcess(RobotProcess):\n def __init__(\n self, title: str = \"Rembrain Robot Framework Example\", *args, **kwargs\n ):\n super(GUIProcess, self).__init__(*args, **kwargs)\n\n # Required for persistence of images,\n # because if they go out of scope, they disappear from the canvas\n self._tk_images: T.Dict[str, T.Optional[ImageTk.PhotoImage]] = {\n \"image_orig\": None,\n \"image_processed\": None,\n }\n self._title: str = title\n\n def run(self) -> None:\n canvas_orig = sg.Canvas(size=(533, 400))\n canvas_processed = sg.Canvas(size=(533, 400))\n\n layout = [\n [sg.Text(\"Original\", size=(76, 1)), sg.Text(\"Processed\")],\n [canvas_orig, canvas_processed],\n ]\n window = sg.Window(self._title, layout, location=(10, 10))\n\n while True:\n event, values = window.read(timeout=10)\n if event in (sg.WIN_CLOSED, \"Exit\"):\n break\n\n self.try_redraw_image(\"image_orig\", canvas_orig)\n self.try_redraw_image(\"image_processed\", canvas_processed)\n\n window.close()\n self.shared.exit_flag.value = True\n\n def try_redraw_image(self, queue_name: str, canvas_elem: sg.Canvas) -> None:\n if self.is_empty(queue_name):\n return\n\n raw_image: T.Union[tuple, np.ndarray] = self.consume(queue_name)\n\n # If we got depth data included - discard it\n if type(raw_image) is tuple:\n raw_image = raw_image[0]\n\n img = Image.fromarray(raw_image)\n img = img.resize(canvas_elem.get_size())\n self._tk_images[queue_name] = ImageTk.PhotoImage(img)\n\n canvas_elem.TKCanvas.delete(\"all\")\n canvas_elem.TKCanvas.create_image(\n 0, 0, image=self._tk_images[queue_name], anchor=\"nw\"\n )\n","repo_name":"VasilyMorzhakov/rembrain_robotframework","sub_path":"examples/common/processes/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"10986639083","text":"import torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nimport time\nimport numpy as np\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import accuracy_score\n\nfrom Utils.utils import get_eval_predictions\n\n\nclass Trainer:\n def __init__(self, model, kinase_model, optimizer, scheduler, args):\n self.model = model\n self.kinase_model = kinase_model\n self.optimizer = optimizer\n self.device = args.DEVICE\n self.lr_scheduler = scheduler\n self.args = args\n\n # Set model device\n self.model.to(self.device)\n if kinase_model is not None:\n self.kinase_model.to(self.device)\n if args.TRAIN_KINASE == False:\n self.kinase_model.eval()\n\n\n def train_step(self, train_dataloader):\n \n # Set model to training mode\n self.model.train()\n\n epoch_loss, epoch_accuracy = 0, 0\n\n for _, (phosphosite,kinase,y) in enumerate(train_dataloader):\n\n # Set device of data\n if self.args.HF_ONLY_ID:\n phosphosite = phosphosite.to(self.device)\n else:\n phosphosite['input_ids'] = phosphosite['input_ids'].to(self.device)\n phosphosite['attention_mask'] = phosphosite['attention_mask'].to(self.device)\n kinase = kinase.to(self.device)\n y = y.to(self.device)\n ### Train candidate kinase device datasete ekle\n\n # Reset Gradients\n self.optimizer.zero_grad()\n\n # Prediction\n output = self.model(phosphosite, kinase, self.train_dataset.kinase_set_with_1)\n \n # Loss\n loss, outclassidx = self.criterion(output['kinase_logit'], output['unique_logits'])\n \n # Calculating Gradients\n loss.backward()\n #torch.nn.utils.clip_grad_norm_(self.model.parameters(), config.CLIP_GRADIENTS)\n \n # Update Weights\n self.optimizer.step()\n \n # Calculating Performance Metrics\n epoch_accuracy += accuracy_score(y.cpu(), outclassidx.cpu(), normalize=True)\n epoch_loss += loss.item()\n\n epoch_loss /= len(train_dataloader)\n epoch_accuracy /= len(train_dataloader)\n\n return epoch_loss, epoch_accuracy\n\n\n def eval_step(self, val_dataloader, ValCandidatekinaseEmbeddings, ValCandidateKE_to_Kinase, ValKinaseUniProtIDs, mlb_Val):\n \n self.model.eval()\n \n #X, CE, y = next(iter(val_dataloader))\n phosphosite, kinase, labels = val_dataloader.phosphosite, val_dataloader.kinase_with_1, val_dataloader.labels\n if self.args.USE_ESM_KINASE:\n candidate_kinase_with_1 = torch.from_numpy(np.c_[ ValCandidatekinaseEmbeddings, np.ones(len(ValCandidatekinaseEmbeddings))]).to(torch.int64)\n else:\n candidate_kinase_with_1 = torch.from_numpy(np.c_[ ValCandidatekinaseEmbeddings, np.ones(len(ValCandidatekinaseEmbeddings))]).float()\n # Set Data Device\n if self.args.HF_ONLY_ID:\n phosphosite = phosphosite.to(self.device)\n else:\n phosphosite['input_ids'] = phosphosite['input_ids'].to(self.device)\n phosphosite['position_ids'] = phosphosite['position_ids'].to(self.device)\n candidate_kinase_with_1 = candidate_kinase_with_1.to(self.device)\n\n with torch.no_grad():\n output = self.model(phosphosite, kinase, candidate_kinase_with_1)\n # Equation 5 from paper\n outclassidx = torch.argmax(output['unique_logits'], dim=1) \n classes = ValCandidatekinaseEmbeddings[outclassidx.cpu()]\n probabilities = torch.nn.functional.softmax(output['unique_logits'], dim=1)\n\n probabilities = probabilities.cpu().numpy()\n\n # get UniProtIDs for predicted classes and return them\n UniProtIDs =[]\n for c in classes:\n UniProtIDs.append(ValCandidateKE_to_Kinase[tuple(c)])\n UniProtIDs = np.array(UniProtIDs)\n\n Val_Evaluation, binlabels_pred = get_eval_predictions(UniProtIDs, probabilities, ValKinaseUniProtIDs, labels, mlb_Val)\n\n return Val_Evaluation, UniProtIDs, probabilities, binlabels_pred\n \n\n def train(self,\n train_dataset,\n val_dataset,\n ValCandidatekinaseEmbeddings=None,\n ValCandidateKE_to_Kinase=None, \n ValKinaseUniProtIDs=None):\n \n # Datasets and Dataloaders\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n\n train_dataloader = DataLoader(train_dataset, batch_size = self.args.BATCH_SIZE, shuffle = True)\n #if val_dataset is not None:\n #val_dataloader = DataLoader(val_dataset, batch_size = len(val_dataset), shuffle = False)\n\n # Set it for loss calculation\n self.train_dataset.kinase_set_with_1 = self.train_dataset.kinase_set_with_1.to(self.device)\n\n # For Classification Report\n if ValKinaseUniProtIDs is not None:\n mlb_Val = MultiLabelBinarizer()\n binlabels_true_Val = mlb_Val.fit_transform(ValKinaseUniProtIDs)\n\n \n ### TRAINING STARTS ###\n epochs_start_time = time.time()\n\n for epoch in range(self.args.NUM_EPOCHS):\n print(\"===================================\\nepoch: {}\\t\".format(epoch))\n train_step_start_time = time.time()\n\n train_loss, train_acc = self.train_step(train_dataloader)\n\n print(f'Train Step takes {time.time() - train_step_start_time} seconds')\n\n if val_dataset is not None:\n Val_Evaluation, UniProtIDs, probabilities, binlabels_pred = self.eval_step(val_dataset, ValCandidatekinaseEmbeddings, ValCandidateKE_to_Kinase, ValKinaseUniProtIDs, mlb_Val)\n \n # Change learning rate with scheduler\n self.lr_scheduler.step()\n\n # Epoch results (Train)\n print(\"train_loss: {:.3f}, train_acc: {:.3f}\".format(train_loss, train_acc))\n \n # Epoch results (Validation)\n if val_dataset is not None:\n #print(classification_report(binlabels_true_Val, binlabels_pred, target_names=mlb_Val.classes_) + '\\n\\n')\n print('Acccuracy_Val: {} Loss_Val: {} Top5Accuracy: {} Top10Accuracy: {}'\\\n .format(Val_Evaluation[\"Accuracy\"], Val_Evaluation[\"Loss\"], Val_Evaluation[\"Top5Acc\"], Val_Evaluation[\"Top10Acc\"]))\n\n print(f'Epochs time for 1 model: {time.time() - epochs_start_time} seconds')\n\n if val_dataset is not None:\n return train_acc, train_loss, Val_Evaluation, UniProtIDs, probabilities, mlb_Val, binlabels_true_Val\n else:\n return train_acc, train_loss, None, None, None, None, None \n\n\n def criterion(self, kinase_logit, unique_logits):\n \n # Calculating the maximum of each row to normalize logits so that softmax doesn't overflow\n maxlogits = torch.max(unique_logits, dim=1, keepdim=True)[0] # output shape: (batch,1)\n \n ## p(y|x): Equation 1 from paper\n numerator = kinase_logit - maxlogits.squeeze()\n denominator = torch.sum(torch.exp(unique_logits - maxlogits), dim=1)\n softmax_out = torch.exp(numerator) / (denominator + 1e-15) # output shape: (batch)\n \n ## Cross Entropy\n # Equation 4 from paper\n P = torch.clamp(softmax_out, min=1e-15, max=1.1)\n loss = torch.mean(-torch.log(P))\n\n # Find the class index for each data point (the class with maximum F score)\n # I detached it due to preventing unnecessary gradient calculation\n outclassidx = torch.argmax(unique_logits.detach(), dim=1) # output shape: (batch)\n\n return loss, outclassidx\n","repo_name":"mertpekey/DeepKinZero","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1175415618","text":"from django.conf.urls import patterns, url\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\nurlpatterns = format_suffix_patterns(patterns('',\n #url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n #url(r'^sites/', include('bluusites.api_urls', namespace='sites')),\n #url(r'^companies/', include('companies.api_urls', namespace='companies')),\n #url(r'^devices/', include('devices.api_urls', namespace='devices')),\n url(r'^sites/(?P[0-9\\w.@+-]+)/devices/(?P[0-9\\w.@+-]+)/statuses/',\n views.DeviceStatusCreateView.as_view(),\n name='create_status'),\n url(r'^sites/(?P[0-9\\w.@+-]+)/',\n views.SiteHeartBeatView.as_view(),\n name='site_heartbeat'),\n ), allowed=['json', 'html']\n\n)\n\n","repo_name":"bluusystemsinc/bluu","sub_path":"bluu-web/project/apps/apiv1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38640613801","text":"from services import infoService\nfrom config import config\n\n\nclass InfoContoller:\n def __init__(self, db):\n self.infoService = infoService.InfoService()\n self.collection = config.config()[\"COLLECTION_NAME\"][\"INFO\"]\n self.collection = db[self.collection]\n\n def defaultMethod(self, params):\n try:\n result = []\n if not params:\n result = self.infoService.getAllLogs(collection=self.collection)\n print(result)\n return result\n except Exception as ex:\n print(ex)\n return False\n\n\nclass InfoFilterController:\n def __init__(self, db):\n self.infoService = infoService.InfoService()\n self.collection = config.config()[\"COLLECTION_NAME\"][\"INFO\"]\n self.collection = db[self.collection]\n\n def masterMethod(self,filterType, conditions):\n try:\n result = []\n if filterType.lower() == \"search\":\n result = self.infoService.getSearchedLogs(self.collection, conditions)\n print(result)\n return result\n except Exception as ex:\n print(ex)\n return []\n\n","repo_name":"ahmednasir/LogKyaKahenge","sub_path":"controllers/InfoController.py","file_name":"InfoController.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27348865359","text":"import cv2\nimport os\nimport firebase_admin\nimport pyrebase\nfrom firebase_admin import credentials\nfrom src import watcher, encode_faces\nfrom src.watcher import Watcher\n\nconfig={\n \"apiKey\": \"API_KEY\",\n \"authDomain\": \"AUTH_DOMAIN\",\n \"databaseURL\": \"DB_URL\",\n \"projectId\": \"P_ID\",\n \"storageBucket\": \"BUCKET\",\n \"serviceAccount\": \"SERVICE_ACCOUNT\",\n \"messagingSenderId\": \"ID\",\n \"appId\": \"APP_ID\",\n \"measurementId\": \"M_ID\"\n}\n\nfirebase=pyrebase.initialize_app(config)\nstorage=firebase.storage()\ncred=credentials.Certificate(\n \"certificate\")\nfirebase_admin.initialize_app(cred, {\n \"databaseURL\": \"database_url\",\n \"databaseAuthVariableOverride\": None\n})\ndb=firebase.database()\n\n\ndef stream_handler(message):\n ab=str(1)\n ba=str(1)\n\n files=storage.child('/').list_files()\n vidfiles=storage.child('/').list_files()\n\n # path_for_images=\"/home/chirag/PycharmProjects/WatchDog_Server/images_to_recognise\"\n path_for_videos=\"/home/chirag/Documents/Code_new/WatchDog_Server/media\"\n\n node=str(message[\"path\"]).split('/')[-2] # you can slice the path according to your requirement\n property=str(message[\"path\"]).split('/')[-1]\n value=message[\"data\"]\n if (message[\"event\"] == \"put\"):\n for vid in vidfiles:\n try:\n if \"videos/\" in vid.name:\n if vid.name == \"videos/\":\n continue\n else:\n storage.child(vid.name).download(path_for_videos+\"/\"+ba+\".mp4\")\n y=int(ba)\n ba=str(y+1)\n except:\n print(\"video fail to download\")\n else:\n print(\"error\")\n\nmy_stream=db.child('/').stream(stream_handler)\n\nwatch_file='/home/chirag/Documents/Code_new/WatchDog_Server/media'\n\ndef capture_encode_faces():\n path=\"/home/chirag/Documents/Code_new/WatchDog_Server/media/\"\n capt=cv2.VideoCapture(path+'1.mp4')\n dir=\"/home/chirag/Documents/Code_new/WatchDog_Server/dataset\"\n face_id='Valay'\n count=0\n while (capt.isOpened()):\n face_detector=cv2.CascadeClassifier(\n '/home/chirag/Documents/Code_new/WatchDog_Server/cascades/haarcascade_frontalface_alt2.xml')\n ret, frame=capt.read()\n if ret == True:\n gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces=face_detector.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(gray, (x, y), (x+w, y+h), (255, 0, 0), 2)\n count+=1\n dir1=os.path.join(dir, face_id)\n if not os.path.exists(dir1):\n os.makedirs(dir1)\n # Save the captured image into the datasets folder\n cv2.imwrite(dir1+\"/\"+str(count)+\".jpg\",\n gray[y:y+h, x:x+w])\n cv2.imshow('frame', gray)\n if count >= 10:\n print(\"10 face sample and stop dataset\")\n break\n else:\n break\n capt.release()\n cv2.destroyAllWindows()\n encode_faces.encoding()\n print(\"encoded images\")\n\ndef custom_action():\n capture_encode_faces()\n\nwatch1=Watcher(watch_file, custom_action) # also call custom action function\nwatch1.watch()# start the watch going\nwatch1.look()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"amruta5694/MWS","sub_path":"WatchDog_Server/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70644082724","text":"##############################################################\n# #\n# Program Code for Fred Inmoov #\n# Of the Cyber_One YouTube Channel #\n# https://www.youtube.com/cyber_one #\n# #\n# This is version 5 #\n# Divided up into sub programs #\n# Coded for the Nixie Version of MyRobotLab. #\n# #\n# Running on MyRobotLab (MRL) http://myrobotlab.org/ #\n# Fred in a modified Inmmov robot, you can find all the #\n# origonal files on the Inmoov web site. http://inmoov.fr/ #\n# #\n# 6_Life_Functions/3_Neck_Control.py #\n# This file is to Simulate life movements associated with #\n# the Neck #\n# #\n##############################################################\nimport math\nimport time\n\n# Pan and Tilt are the common methods of controlling a camera.\n# normally, you would have a rotating base and the place a tilt mechinism on the pan base.\n# we have a Pitch and Roll with the Yaw on top of that.\n# When we look up at 20 and then turn the head 90 to the left,\n# the head will end up with a tilt of 0 but the head will roll 20\n# to overcome this we need a Pan and Tilt translation.\n# This function assumes that 0, 0, 0 is facing straight ahead with tilt and roll level.\ndef HeadPanTilt(Pan, Tilt, Roll):\n #print \"PanTilt( \", Pan, \", \", Tilt, \", \", Roll, \")\"\n PanTo = Pan + 50.0\n #print \"Panning To \", PanTo\n if EnableHeadYaw == True:\n HeadYaw.moveTo(PanTo)\n PanRadians = math.radians(Pan)\n #print \"Thats \", PanRadians, \"Radians\"\n if EnableHeadPitch == True:\n PitchMath = (Tilt*math.cos(PanRadians) + Roll*math.sin(PanRadians))\n #print \"PitchMath = \", PitchMath\n HeadPitch.moveTo(50+PitchMath)\n if EnableHeadRoll == True:\n RollMath = (Tilt*math.sin(PanRadians) + Roll*math.cos(PanRadians))\n #print \"RollMath = \", RollMath\n HeadRoll.moveTo(50+RollMath)\n #print \"PanTilt finished\"\n\n# This group of methods allow you to control one virtual axis\n# at a time, you pass the new value and it grabs the last\n# known positions of the other axis.\ndef HeadPanTo(NewPan):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadPanPos = NewPan\n if HeadPanPos < -50: \n HeadPanPos = -50\n if HeadPanPos > 50: \n HeadPanPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n\ndef HeadTiltTo(NewTilt):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadTiltPos = NewTilt\n if HeadTiltPos < -50: \n HeadTiltPos = -50\n if HeadTiltPos > 50: \n HeadTiltPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n\ndef HeadRollTo(NewRoll):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadRollPos = NewRoll\n if HeadRollPos < -50: \n HeadRollPos = -50\n if HeadRollPos > 50: \n HeadRollPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n\n# This group of Methods will add a value to the current\n# virtual axis one at a time. The other values are grabbed\n# from memory\ndef NeckPan(NewPan):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadPanPos = HeadPanPos + NewPan\n if HeadPanPos < -50: \n HeadPanPos = -50\n if HeadPanPos > 50: \n HeadPanPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n\ndef NeckTilt(NewTilt):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadTiltPos = HeadTiltPos + NewTilt\n if HeadTiltPos < -50: \n HeadTiltPos = -50\n if HeadTiltPos > 50: \n HeadTiltPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n\ndef NeckRoll(NewRoll):\n global HeadPanPos\n global HeadTiltPos\n global HeadRollPos\n HeadRollPos = HeadRollPos + NewRoll\n if HeadRollPos < -50: \n HeadRollPos = -50\n if HeadRollPos > 50: \n HeadRollPos = 50\n HeadPanTilt(HeadPanPos, HeadTiltPos, HeadRollPos)\n","repo_name":"Cyber-One/Fred_Inmoov","sub_path":"Fred/6_Life_Functions/3_Neck_Control.py","file_name":"3_Neck_Control.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"3328558865","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*- \n#----------------------------------------------------------------------------\n# Created Date: 20/01/2023 13:54\n# version = '1.0'\n# ---------------------------------------------------------------------------\n\"\"\" MY470 Computer Programming Final Assignment\"\"\"\n# ---------------------------------------------------------------------------\nimport random\nimport copy\nimport numpy as np\nfrom collections import Counter\nimport datetime\nimport time\n# ---------------------------------------------------------------------------\nfrom readData import read_kills_data, read_team_data_id, read_cheaters_data\n# ---------------------------------------------------------------------------\n\ndef count_cheaters(l, cheaters):\n \"\"\"The function takes a list and a dictionary as input, \n it counts the number of items in the list that are in the dictionary and returns the total count.\"\"\"\n cheater_count = 0\n for item in l:\n if item in cheaters:\n cheater_count += 1\n return cheater_count\n\ndef estimate_expected_counts(team_id, cheaters, randomise = False, n = None):\n \"\"\"The function takes in a dictionary of match IDs, \n a dictionary of cheater accounts, and two optional arguments; a boolean indicating whether to \n shuffle team IDs and an integer indicating number of iterations. It counts the number of cheaters in \n each team and returns the expected counts by averaging over the number of iterations.\"\"\"\n match_ids = set(list(team_id.keys()))\n teams_with_cheaters = {0:0, 1:0, 2:0, 3:0}\n\n if not randomise:\n for match in match_ids:\n accounts = team_id[match][0]\n team_ids = team_id[match][1].copy()\n\n temp_team_dict = {}\n for t in range(len(team_ids)):\n if team_ids[t] in temp_team_dict.keys(): \n temp_team_dict[team_ids[t]].append(accounts[t])\n else:\n temp_team_dict[team_ids[t]] = [accounts[t]]\n\n for team, players in temp_team_dict.items():\n cheaters_per_team = count_cheaters(players, cheaters)\n if cheaters_per_team in [0, 1, 2, 3]:\n teams_with_cheaters[cheaters_per_team]+=1\n\n counts = teams_with_cheaters\n \n return counts\n\n else:\n teams_with_cheaters_randomised={0:[], 1:[], 2:[], 3:[]}\n\n for i in range(n):\n teams_with_cheaters = {0:0, 1:0, 2:0, 3:0}\n for match in match_ids:\n accounts = team_id[match][0]\n team_ids = team_id[match][1].copy()\n np.random.shuffle(team_ids)\n\n temp_team_dict = {}\n for t in range(len(team_ids)):\n if team_ids[t] in temp_team_dict.keys(): \n temp_team_dict[team_ids[t]].append(accounts[t])\n else:\n temp_team_dict[team_ids[t]] = [accounts[t]]\n\n cheaters_per_team = {} \n for team, player in temp_team_dict.items():\n cheaters_per_team = count_cheaters(player, cheaters)\n if cheaters_per_team in [0, 1, 2, 3]:\n teams_with_cheaters[cheaters_per_team]+=1\n \n for k, v in teams_with_cheaters.items():\n teams_with_cheaters_randomised[k].append(v)\n\n \n counts_randomised = {k: [np.mean(v), [np.mean(v) - 1.96 * np.std(v) / np.sqrt(n), np.mean(v) + 1.96 * np.std(v) / np.sqrt(n)]] for k, v in teams_with_cheaters_randomised.items()}\n\n return counts_randomised\n\ndef is_cheater_yet(cheaters, p, date):\n \"\"\"The function takes a player and a date as input, and checks if the player was a cheater at the given date \n by checking the player's existence in a dictionary called 'cheaters' and the date of the input is after the date \n the player became a cheater. \n It returns True if the player was a cheater at the given date and False otherwise.\"\"\"\n if p in cheaters.keys():\n if date > cheaters[p][0]:\n return True\n else:\n return False\n else:\n return False\n\ndef shuffle_players(kills, match_id):\n \"\"\"The function shuffles player IDs for a selected match in the kills dictionary \n and relabels the match kills with the shuffled player IDs\"\"\" \n match_kills = kills[match_id]\n players_in_match = get_players_in_match(kills, match_id, offset=0)\n players_shuffled = players_in_match.copy()\n np.random.shuffle(players_shuffled)\n relabelled_ids = dict(zip(players_in_match, players_shuffled))\n \n return [[relabelled_ids[kill[0]], relabelled_ids[kill[1]], kill[2]] for kill in match_kills]\n\n\ndef get_players_in_match(kills, match_id, offset=0):\n \"\"\"This function takes in match kills data, a match id and an optional offset, and returns a list of unique players in the match, \n after skipping over a certain number of elements in the kills data, as specified by the offset.\"\"\"\n match_kills = kills[match_id]\n players_in_match = []\n\n for i in range(offset, len(match_kills)):\n item = match_kills[i]\n players_in_match.append(item[0])\n players_in_match.append(item[1])\n\n players_in_match = list(set(players_in_match))\n return players_in_match\n\ndef get_victims_turn_cheaters(cheaters, kills, randomised = False, n = None, global_detected_victims_turn_cheaters=[]):\n \"\"\"This function calculates the number of victims that turn into cheaters in a given set \n of matches by iterating through the \"kills\" dictionary and using the \"victims_turn_cheaters_per_match\" \n function. If \"randomised\" is set to True, it will also calculate the mean number of victims that turned \n into cheaters with a 95% confidence interval by shuffling players in each match \"n\" times.\"\"\"\n victims_turn_cheaters = 0\n detected_victims_turn_cheaters = global_detected_victims_turn_cheaters\n\n if not randomised:\n for match_id in kills.keys(): \n match_kills = kills[match_id]\n match_date = datetime.datetime.strptime(match_kills[0][2][:10], \"%Y-%m-%d\") \n for kill in match_kills:\n killer = kill[0]\n killed = kill[1]\n killed_by_cheater = is_cheater_yet(cheaters, killer, match_date)\n if killed_by_cheater:\n killed_is_cheater = is_cheater_yet(cheaters, killed, match_date)\n if killed not in detected_victims_turn_cheaters:\n if not killed_is_cheater:\n if killed in cheaters.keys():\n victims_turn_cheaters += 1\n detected_victims_turn_cheaters.append(killed)\n \n return victims_turn_cheaters, detected_victims_turn_cheaters\n\n else:\n victims_turn_cheaters_counts = []\n for i in range(n):\n detected_victims_turn_cheaters = []\n victims_turn_cheaters=0\n for match_id in kills.keys(): \n match_kills = kills[match_id]\n match_kills = shuffle_players(kills, match_id)\n new_victims_turn_cheaters, detected_victims_turn_cheaters = get_victims_turn_cheaters(cheaters, {match_id: match_kills}, randomised = False, global_detected_victims_turn_cheaters = detected_victims_turn_cheaters)\n victims_turn_cheaters += new_victims_turn_cheaters\n\n victims_turn_cheaters_counts.append(victims_turn_cheaters)\n \n victims_turn_cheaters_mean = np.mean(victims_turn_cheaters_counts)\n victims_turn_cheaters_std = np.std(victims_turn_cheaters_counts) \n confidence_interval_95_positive = victims_turn_cheaters_mean + 1.96 * (victims_turn_cheaters_std) / np.sqrt(n)\n confidence_interval_95_negative = victims_turn_cheaters_mean - 1.96 * (victims_turn_cheaters_std) / np.sqrt(n) \n \n return victims_turn_cheaters_mean, [confidence_interval_95_negative, confidence_interval_95_positive]\n\n\ndef count_observers_start_cheating(cheaters, kills, randomised = False, n = None, global_observers_start_cheating = []):\n \"\"\"This function calculates the number of observers that start cheating by iterating through the \n \"kills\" dictionary and using the function \"is_cheater_yet\" and \"get_players_in_match\". \n If \"randomised\" is set to True, it will also calculate the mean number of \n observers that start cheating with a 95% confidence interval by shuffling players in each match \"n\" times.\"\"\"\n observers_start_cheating = 0\n detected_observers_start_cheating = global_observers_start_cheating\n \n if not randomised:\n for match_id in kills.keys():\n match_kills = kills[match_id]\n date = datetime.datetime.strptime(match_kills[0][2][:10], \"%Y-%m-%d\") \n cheaters_victim_count = {}\n i=0\n while i < len(match_kills) and max(cheaters_victim_count.values(), default=0)<3:\n killer = match_kills[i][0]\n killed_by_cheater=is_cheater_yet(cheaters, killer,date) \n if killed_by_cheater: \n if killer in cheaters_victim_count.keys():\n cheaters_victim_count[killer]+=1\n else:\n cheaters_victim_count[killer]=1 \n i+=1 \n\n if i==len(match_kills):\n pass\n else:\n observers = get_players_in_match(kills, match_id, offset=i)\n\n for observer in observers:\n if not observer in detected_observers_start_cheating:\n observer_is_cheater = is_cheater_yet(cheaters, observer, date) \n if not observer_is_cheater:\n if observer in cheaters.keys():\n observers_start_cheating+=1\n detected_observers_start_cheating.append(observer)\n \n return observers_start_cheating, global_observers_start_cheating\n \n else:\n count_observers_start_cheating_random = []\n for j in range(n):\n detected_observers_start_cheating = []\n observers_start_cheating = 0\n\n for match_id in kills.keys():\n match_kills = shuffle_players(kills, match_id)\n new_observers_start_cheating, detected_observers_start_cheating = count_observers_start_cheating(cheaters, {match_id: match_kills}, randomised=False, global_observers_start_cheating = detected_observers_start_cheating) \n \n observers_start_cheating += new_observers_start_cheating\n \n count_observers_start_cheating_random.append(observers_start_cheating)\n \n count_observers_start_cheating_mean = np.mean(count_observers_start_cheating_random)\n count_observers_start_cheating_std = np.std(count_observers_start_cheating_random) \n confidence_interval_95_positive = count_observers_start_cheating_mean + 1.96 * (count_observers_start_cheating_std) / np.sqrt(n)\n confidence_interval_95_negative = count_observers_start_cheating_mean - 1.96 * (count_observers_start_cheating_std) / np.sqrt(n) \n \n return count_observers_start_cheating_mean, [confidence_interval_95_negative, confidence_interval_95_positive]\n \n\n","repo_name":"amywhiffen/Portfolio","sub_path":"Computer Programming/basicFunctions.py","file_name":"basicFunctions.py","file_ext":"py","file_size_in_byte":11619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21663639133","text":"from flask import (\n Flask,\n render_template,\n request,\n redirect,\n url_for,\n jsonify,\n session,\n)\n\nfrom database.database import database_bp\nfrom units.units import units_bp\nfrom models import db\nfrom model_functions import (\n get_period,\n add_deviation,\n get_deviation,\n edit_deviation,\n get_parameters,\n get_units,\n get_equipment_type,\n add_equipment_type,\n set_quality_control,\n get_quality_control,\n get_protocols,\n)\n\nfrom acc_funct import get_unit, gather_info, menu\n\nprotocols = {}\n\napp = Flask(__name__)\n\napp.register_blueprint(database_bp, url_prefix=\"/database\")\napp.register_blueprint(units_bp, url_prefix=\"/units\")\napp.secret_key = \"secret\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///ITRO.db\"\ndb.init_app(app)\n\n\n@app.route(\"/\")\ndef index():\n units = menu()\n session[\"units\"] = units\n return render_template(\"index.html\", menu_units=units)\n\n\n# @app.route(\"/database/general_data\")\n# def general_data():\n# return render_template(\n# \"database/general_data/general_data.html\", units=session[\"units\"]\n# )\n\n\n@app.route(\"/equipment_type\", methods={\"GET\", \"POST\"})\ndef equipment_types():\n if request.method == \"POST\":\n if request.form[\"equipment_type\"] != \"\":\n equipment_type = request.form[\"equipment_type\"]\n add_equipment_type(equipment_type)\n return redirect(url_for(\"equipment_types\"))\n elif request.method == \"GET\":\n equipment_types = get_equipment_type()\n return render_template(\n \"general_info/equipment_type.html\",\n equipment_types=enumerate(equipment_types),\n )\n\n\n@app.route(\"/selection\")\ndef selection():\n return render_template(\"unit_selection.html\")\n\n\n@app.route(\"/add_unit\")\ndef add_unit():\n return render_template(\"add_unit.html\")\n\n\n@app.route(\"/control\")\ndef control():\n return render_template(\"control.html\")\n\n\n@app.route(\"/unit_parameter\", methods=[\"GET\", \"POST\"])\n@app.route(\"/unit_parameter/\", methods=[\"GET\", \"POST\"])\ndef unit_parameter(id=None):\n if request.method == \"POST\":\n set_quality_control(\n request.form[\"parameter_desc\"], request.form[\"period_desc\"], id\n )\n return redirect(url_for(\"unit_parameter\", id=id))\n elif request.method == \"GET\":\n units = get_units()\n parameters = get_parameters()\n periods = get_period()\n unit_parameters = get_quality_control(id)\n active_unit = get_unit(id)\n return render_template(\n \"quality_control/unit_parameter.html\",\n units=units,\n parameters=parameters,\n periods=periods,\n unit_parameters=unit_parameters,\n unit=active_unit,\n )\n\n\n@app.route(\"/control_protocol\")\ndef control_protocol():\n periods = [x.period_desc for x in get_period()]\n protocols = get_protocols()\n units = [x.id for x in get_units()]\n return render_template(\n \"quality_control/select_protocol.html\",\n periods=periods,\n protocols=protocols,\n units=units,\n )\n\n\n@app.route(\"/daily\", methods=[\"GET\", \"POST\"])\ndef daily():\n if request.method == \"POST\":\n data = request.get_json()\n global protocols\n protocols = gather_info(data)\n return jsonify(dict(redirect=\"/daily\"))\n else:\n data = protocols\n print(data)\n return render_template(\n \"quality_control/protocol/protocol.html\", data=data, width=11\n )\n\n\n@app.route(\"/units/document\", methods={\"GET\", \"POST\"})\n# @app.route(\"/database/general_data/deviation/\", methods={\"POST\"})\ndef document():\n if request.method == \"POST\":\n if request.form[\"submit_button\"] == \"Исправить\":\n edit_deviation(request.form[\"deviation_value\"], id)\n else:\n add_deviation(request.form[\"deviation_value\"])\n return redirect(url_for(\"deviation\"))\n elif request.method == \"GET\":\n deviations = get_deviation()\n return render_template(\"units/document.html\")\n\n\nif __name__ == \"__main__\":\n with app.app_context():\n db.create_all()\n app.run(debug=True, host=\"0.0.0.0\")\n","repo_name":"Blshop/ITRO","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75125969444","text":"from .functions import *\r\nfrom .point import *\r\n\r\ndef intersection_point(function1, function2):\r\n \"\"\"\r\n this function returns a point from the intersection of two functions,\r\n they need to have a different \"a\" value to touch\r\n \"\"\"\r\n a = function1.get_a()\r\n b = function1.get_b()\r\n c = function2.get_a()\r\n d = function2.get_b()\r\n if a != c:#se \"a\" for igual a \"c\", as funções serão paralelas e nunca se tocarão\r\n x = (d - b)/(a - c)\r\n if x == int(x):\r\n x = int(x)\r\n else:\r\n raise Exception('this functions will never intersect')\r\n return function1.get_point(x)\r\n\r\ndef generate_function(point1, point2):\r\n \"\"\"\r\n This function return a Function objetc from the functions.py\r\n this function is generated from the line connecting two points, these points cannot\r\n have the same value of x\r\n \"\"\"\r\n y = point1.get_y()\r\n z = point2.get_y()\r\n x = point1.get_x()\r\n w = point2.get_x()\r\n if x != w:#se \"x\" for igual a \"w\", esses pontos terão o mesmo valor de dominio. E não será possível determinar uma função entre eles\r\n a = (y - z)/(x - w)\r\n if z != y:#ja que os dominios são diferentes, se \"z\" for diferente de \"y\" descarta-se a possibilidade de função linear, e \"b\" precisa ser calculado\r\n b = (a*(y*w - z*x))/(z - y)\r\n else:#função linear\r\n raise Exception('The function can\\'t be constant, only \"afim\" functions') #poderia ser \"y\" ou \"w\", já que ambos são iguais. ou seja \"b\" será constante\r\n return Function(a, b)\r\n else:\r\n raise Exception('domain of points can\\'t be the same')\r\n","repo_name":"Gramosa/Importacao_teste","sub_path":"plano_cartesiano/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17018022585","text":"import os\nfrom datetime import datetime\nfrom stat import ST_CTIME\nimport re\nimport logging\nimport csv\n\nfrom dipper.sources.PostgreSQLSource import PostgreSQLSource\nfrom dipper.models.Model import Model\nfrom dipper import config\nfrom dipper.models.Reference import Reference\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EOM(PostgreSQLSource):\n \"\"\"\n Elements of Morphology is a resource from NHGRI that has definitions of\n morphological abnormalities, together with image depictions.\n We pull those relationships, as well as our local mapping of equivalences\n between EOM and HP terminologies.\n\n The website is crawled monthly by NIF's DISCO crawler system,\n which we utilize here.\n Be sure to have pg user/password connection details in your conf.json file,\n like:\n dbauth : {'disco' : {'user' : '', 'password' : ''}}\n\n Monarch-curated data for the HP to EOM mapping is stored at\n https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/src/mappings/hp-to-eom-mapping.tsv\n\n Since this resource is so small, the entirety of it is the \"test\" set.\n\n \"\"\"\n\n # we are using the production view here; should we be using services?\n tables = [\n 'dvp.pr_nlx_157874_1'\n ]\n GHRAW = 'https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology'\n files = {\n 'map': {\n 'file': 'hp-to-eom-mapping.tsv',\n 'url': GHRAW + '/master/src/mappings/hp-to-eom-mapping.tsv'\n }\n }\n\n def __init__(self, graph_type, are_bnodes_skolemized):\n super().__init__(\n graph_type,\n are_bnodes_skolemized,\n 'eom',\n ingest_title='Elements of Morphology',\n ingest_url='http://elementsofmorphology.nih.gov',\n data_rights='http://www.genome.gov/copyright.cfm',\n license_url='https://creativecommons.org/publicdomain/mark/1.0/'\n # file_handle=None\n )\n\n # check if config exists; if it doesn't, error out and let user know\n if 'dbauth' not in config.get_config() or \\\n 'disco' not in config.get_config()['dbauth']:\n logger.error(\"not configured with PG user/password.\")\n\n # source-specific warnings. will be cleared when resolved.\n\n return\n\n def fetch(self, is_dl_forced=False):\n '''create the connection details for DISCO'''\n\n cxn = config.get_config()['dbauth']['disco']\n cxn.update(\n {'host': 'nif-db.crbs.ucsd.edu', 'database': 'disco_crawler',\n 'port': 5432})\n\n self.dataset.setFileAccessUrl(\n ''.join(('jdbc:postgresql://', cxn['host'], ':', str(cxn['port']),\n '/', cxn['database'])), is_object_literal=True)\n\n # process the tables\n # self.fetch_from_pgdb(self.tables,cxn,100) #for testing\n self.fetch_from_pgdb(self.tables, cxn)\n\n self.get_files(is_dl_forced)\n\n # FIXME: Everything needed for data provenance?\n st = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')))\n filedate = datetime.utcfromtimestamp(st[ST_CTIME]).strftime(\"%Y-%m-%d\")\n self.dataset.setVersion(filedate)\n\n return\n\n def parse(self, limit=None):\n '''\n Over ride Source.parse inherited via PostgreSQLSource\n '''\n\n if limit is not None:\n logger.info(\"Only parsing first %s rows of each file\", limit)\n\n if self.testOnly:\n self.testMode = True\n\n logger.info(\"Parsing files...\")\n\n self._process_nlx_157874_1_view(\n '/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit)\n self._map_eom_terms(\n '/'.join((self.rawdir, self.files['map']['file'])), limit)\n\n logger.info(\"Finished parsing.\")\n\n # since it's so small,\n # we default to copying the entire graph to the test set\n self.testgraph = self.graph\n\n return\n\n def _process_nlx_157874_1_view(self, raw, limit=None):\n \"\"\"\n This table contains the Elements of Morphology data that has been\n screen-scraped into DISCO.\n Note that foaf:depiction is inverse of foaf:depicts relationship.\n\n Since it is bad form to have two definitions,\n we concatenate the two into one string.\n\n Turtle:\n a owl:Class\n rdf:label Literal(eom label)\n OIO:hasRelatedSynonym Literal(synonym list)\n IAO:definition Literal(objective_def. subjective def)\n foaf:depiction Literal(small_image_url),\n Literal(large_image_url)\n foaf:page Literal(page_url)\n rdfs:comment Literal(long commented text)\n\n\n :param raw:\n :param limit:\n :return:\n \"\"\"\n\n model = Model(self.graph)\n line_counter = 0\n with open(raw, 'r') as f1:\n f1.readline() # read the header row; skip\n filereader = csv.reader(f1, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n (morphology_term_id, morphology_term_num,\n morphology_term_label, morphology_term_url,\n terminology_category_label, terminology_category_url,\n subcategory, objective_definition, subjective_definition,\n comments, synonyms, replaces, small_figure_url,\n large_figure_url, e_uid, v_uid, v_uuid,\n v_last_modified, v_status, v_lastmodified_epoch) = line\n\n # note:\n # e_uid v_uuid v_last_modified terminology_category_url\n # subcategory v_uid morphology_term_num\n # terminology_category_label hp_label notes\n # are currently unused.\n\n # Add morphology term to graph as a class\n # with label, type, and description.\n model.addClassToGraph(morphology_term_id,\n morphology_term_label)\n\n # Assemble the description text\n\n if subjective_definition != '' and not (\n re.match(r'.+\\.$', subjective_definition)):\n # add a trailing period.\n subjective_definition = subjective_definition.strip() + '.'\n if objective_definition != '' and not (\n re.match(r'.+\\.$', objective_definition)):\n # add a trailing period.\n objective_definition = objective_definition.strip() + '.'\n\n definition = \\\n ' '.join(\n (objective_definition, subjective_definition)).strip()\n\n model.addDefinition(morphology_term_id, definition)\n\n # FOAF:depicted_by literal url\n # type foaf:depiction\n\n # do we want both images?\n # morphology_term_id has depiction small_figure_url\n if small_figure_url != '':\n model.addDepiction(morphology_term_id,\n small_figure_url)\n\n # morphology_term_id has depiction large_figure_url\n if large_figure_url != '':\n model.addDepiction(morphology_term_id,\n large_figure_url)\n\n # morphology_term_id has comment comments\n if comments != '':\n model.addComment(morphology_term_id, comments.strip())\n\n if synonyms != '':\n for s in synonyms.split(';'):\n model.addSynonym(\n morphology_term_id, s.strip(),\n self.globaltt['hasExactSynonym'])\n\n # morphology_term_id hasRelatedSynonym replaces (; delimited)\n if replaces != '' and replaces != synonyms:\n for s in replaces.split(';'):\n model.addSynonym(\n morphology_term_id, s.strip(),\n self.globaltt['hasRelatedSynonym'])\n\n # morphology_term_id has page morphology_term_url\n reference = Reference(self.graph)\n reference.addPage(morphology_term_id, morphology_term_url)\n\n if limit is not None and line_counter > limit:\n break\n return\n\n def _map_eom_terms(self, raw, limit=None):\n \"\"\"\n This table contains the HP ID mappings from the local tsv file.\n Triples:\n owl:equivalentClass \n :param raw:\n :param limit:\n :return:\n \"\"\"\n\n model = Model(self.graph)\n line_counter = 0\n with open(raw, 'r') as f1:\n f1.readline() # read the header row; skip\n for line in f1:\n line_counter += 1\n row = line.split('\\t')\n (morphology_term_id, morphology_term_label,\n hp_id, hp_label, notes) = row\n\n # Sub out the underscores for colons.\n hp_id = re.sub('_', ':', hp_id)\n if re.match(\".*HP:.*\", hp_id):\n # add the HP term as a class\n model.addClassToGraph(hp_id, None)\n # Add the HP ID as an equivalent class\n model.addEquivalentClass(morphology_term_id, hp_id)\n else:\n logger.warning('No matching HP term for %s', morphology_term_label)\n\n if limit is not None and line_counter > limit:\n break\n\n return\n\n def getTestSuite(self):\n import unittest\n # TODO PYLINT: Unable to import 'tests.test_eom'\n from tests.test_eom import EOMTestCase\n\n test_suite = unittest.TestLoader().loadTestsFromTestCase(EOMTestCase)\n\n return test_suite\n","repo_name":"alexgarciac/dipper","sub_path":"dipper/sources/EOM.py","file_name":"EOM.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"70057811046","text":"import sys\nimport mariadb\nfrom django.db import connection\n\ndef CallReview(c_id, u_email):\n try:\n cur = connection.cursor()\n use_db_query = \"use oosooDB\"\n select_query = f\"SELECT _like, rating, review, _datetime FROM contents_review WHERE c_id = '{c_id}' and u_email = '{u_email}'\"\n\n cur.execute(use_db_query)\n cur.execute(select_query)\n result = cur.fetchall()\n result_list = list()\n for _like, rating, review, _datetime in result:\n result_list.append(_like)\n result_list.append(rating)\n result_list.append(review)\n result_list.append(_datetime)\n\n except mariadb.Error as e:\n print(f\"failed: Error connecting to Mariadb: {e}\")\n sys.exit(1)\n result_list = []\n\n connection.close()\n\n return result_list","repo_name":"kpuce2022CD/OOSOO","sub_path":"Backend/django/api/User/CallReview.py","file_name":"CallReview.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"12503134824","text":"from flask import Flask, request, abort, redirect\nfrom flask_login import LoginManager, login_url\nfrom model.config.session import get_session\nfrom model.user.user import User\n\nfrom views.website import website\nfrom views.user import user\nfrom views.lib import lib\n\nBLUEPRINTS = [\n (website, ''),\n (user, '/user'),\n (lib, \"/lib\"),\n]\n\n\ndef configure_login_manager(app):\n login_manager = LoginManager()\n login_manager.login_view = 'user.login'\n login_manager.init_app(app)\n app.config['SECRET_KEY'] = 'landsljklandsljk'\n\n @login_manager.user_loader\n def user_loader(user_id):\n with get_session() as db_session:\n u = db_session.query(User).get(user_id)\n return u\n\n @login_manager.unauthorized_handler\n def unauthorized():\n if not login_manager.login_view:\n abort(401)\n return redirect(login_url(login_manager.login_view, request.url))\n\n\ndef create_app():\n app = Flask(__name__)\n blueprints = BLUEPRINTS\n for view, url_prefix in blueprints:\n app.register_blueprint(view, url_prefix=url_prefix)\n configure_login_manager(app)\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(host='0.0.0.0', port=5000, debug=True)\n\n","repo_name":"NoharaHiroshi/forum_system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9185888609","text":"#-*- coding:utf-8 -*-\n\nimport os\nimport sys\n\nimport NamePipe\n\npipe = NamePipe.NamePipe(1177, True)\n\nwhile True:\n\ttxt = raw_input(\">>> \")\n\tpipe.write(txt)\n\ttxt = pipe.read()\n\tif txt:\n\t\tsys.stdout.write(\"%s\\n\" % txt)\n\t\tsys.stdout.flush()","repo_name":"Whosemario/ThinkIdeaEx","sub_path":"remote_debug/Test_NamePipe_Client.py","file_name":"Test_NamePipe_Client.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"4418267197","text":"from fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.orm import Session\n\nfrom app.schemas.user import UserCreate, UserUpdate\nfrom app.models import model_user\nfrom app.models import model_dog\n\n\ndef get_user(db: Session, user_id: int):\n return db.query(model_user.User).filter(model_user.User.id == user_id).first()\n\ndef get_user_by_email(db: Session, email: str):\n return db.query(model_user.User).filter(model_user.User.email == email).first()\n\ndef get_users(db: Session, skip: int = 0, limit: int = 100):\n return db.query(model_user.User).offset(skip).limit(limit).all()\n\ndef create_user(db: Session, user: UserCreate):\n db_user = model_user.User(**user.dict())\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\ndef update_user(db: Session, user_update: UserUpdate, user_id: int): \n db_obj = db.query(model_user.User).filter(model_user.User.id == user_id).first()\n obj_data = jsonable_encoder(db_obj)\n update_data = user_update.dict(exclude_unset=True)\n for field in obj_data:\n if field in update_data:\n setattr(db_obj, field, update_data[field])\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n \ndef delete_user(db: Session, user_id: int):\n user_dl = db.query(model_user.User).filter(model_user.User.id == user_id).first()\n if user_dl is None:\n return False\n dogs_dl = db.query(model_dog.Dog).filter(model_dog.Dog.owner_id == user_id).all()\n for i in dogs_dl:\n print(i)\n db.delete(i)\n db.commit()\n db.delete(user_dl)\n db.commit()\n return True\n","repo_name":"FelipeTriana/guane-intern-fastapi","sub_path":"app/crud/crud_user.py","file_name":"crud_user.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14925530759","text":"from flask_sqlalchemy import SQLAlchemy\n\nclass DatabaseManager:\n def __init__(self):\n self.db = SQLAlchemy()\n \n def init_app(self, app):\n import apis.models as models\n models.load()\n self.db.init_app(app)\n self.db.create_all(app=app)\n \n def check_connextion(self):\n try:\n with self.db.engine.connect() as connection:\n connection.execute(\"select 1;\")\n return True\n except Exception:\n return False\n","repo_name":"mridhulak99/celery-queue","sub_path":"apis/utils/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9613168209","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nn,c,k=map(int,input().split())\nx=[0]*2\nans=0\nl=[]\nfor i in range(n):\n n=int(input())\n l.append(n)\nl.sort()\n\n\nfor n in l:\n if x[0]==0 and c>1:\n x=[n,1]\n elif x[0]==0:\n ans+=1\n elif n>x[0]+k:\n ans+=1\n x=[n,1]\n elif x[1]+10])\n","repo_name":"clarinet758/atcoder","sub_path":"agc/g001_025/g011/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33513065003","text":"import numpy as np\nfrom ray.rllib.utils import try_import_torch\nfrom collections import deque\nfrom skimage.util import view_as_windows\n\ntorch, nn = try_import_torch()\nimport torch.distributions as td\nfrom functools import partial\nimport itertools\n\ndef _make_categorical(x, ncat, shape):\n x = x.reshape((x.shape[0], shape, ncat))\n return td.Categorical(logits=x)\n\ndef dist_build(ac_space):\n return partial(_make_categorical, shape=1, ncat=ac_space.n)\n\ndef neglogp_actions(pi_logits, actions):\n return nn.functional.cross_entropy(pi_logits, actions, reduction='none')\n\ndef sample_actions(logits, device):\n u = torch.rand(logits.shape, dtype=logits.dtype).to(device)\n return torch.argmax(logits - torch.log(-torch.log(u)), dim=1)\n\ndef pi_entropy(logits):\n a0 = logits - torch.max(logits, dim=1, keepdim=True)[0]\n ea0 = torch.exp(a0)\n z0 = torch.sum(ea0, dim=1, keepdim=True)\n p0 = ea0 / z0\n return torch.sum(p0 * (torch.log(z0) - a0), axis=1)\n\ndef roll(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])\n\ndef unroll(arr, targetshape):\n s = arr.shape\n return arr.reshape(*targetshape, *s[1:]).swapaxes(0, 1)\n\ndef safe_mean(xs):\n return -np.inf if len(xs) == 0 else np.mean(xs)\n\n\ndef pad_and_random_crop(imgs, out, pad):\n \"\"\"\n Vectorized pad and random crop\n Assumes square images?\n args:\n imgs: shape (B,H,W,C)\n out: output size (e.g. 64)\n \"\"\"\n # n: batch size.\n imgs = np.pad(imgs, [[0, 0], [pad, pad], [pad, pad], [0, 0]])\n n = imgs.shape[0]\n img_size = imgs.shape[1] # e.g. 64\n crop_max = img_size - out\n w1 = np.random.randint(0, crop_max, n)\n h1 = np.random.randint(0, crop_max, n)\n # creates all sliding window\n # combinations of size (out)\n windows = view_as_windows(imgs, (1, out, out, 1))[..., 0,:,:, 0]\n # selects a random window\n # for each batch element\n cropped = windows[np.arange(n), w1, h1]\n cropped = cropped.transpose(0,2,3,1)\n return cropped\n\ndef random_cutout_color(imgs, min_cut, max_cut):\n n, h, w, c = imgs.shape\n w1 = np.random.randint(min_cut, max_cut, n)\n h1 = np.random.randint(min_cut, max_cut, n)\n \n cutouts = np.empty((n, h, w, c), dtype=imgs.dtype)\n rand_box = np.random.randint(0, 255, size=(n, c), dtype=imgs.dtype)\n for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):\n cut_img = img.copy()\n # add random box\n cut_img[h11:h11 + h11, w11:w11 + w11, :] = rand_box[i]\n \n cutouts[i] = cut_img\n return cutouts\n\ndef linear_schedule(initial_val, final_val, current_steps, total_steps):\n frac = 1.0 - current_steps / total_steps\n return (initial_val-final_val) * frac + final_val\n\ndef horizon_to_gamma(horizon):\n return 1.0 - 1.0/horizon\n \nclass AdaptiveDiscountTuner:\n def __init__(self, gamma, momentum=0.98, eplenmult=1):\n self.gamma = gamma\n self.momentum = momentum\n self.eplenmult = eplenmult\n \n def update(self, horizon):\n if horizon > 0:\n htarg = horizon * self.eplenmult\n gtarg = horizon_to_gamma(htarg)\n self.gamma = self.gamma * self.momentum + gtarg * (1-self.momentum)\n return self.gamma\n \ndef flatten01(arr):\n return arr.reshape(-1, *arr.shape[2:])\n\ndef flatten012(arr):\n return arr.reshape(-1, *arr.shape[3:])\n\n \nclass Returnselector:\n def __init__(self, nenvs, ob_space, ac_space, replay_shape, skips = 0, n_pi = 32, num_return = 5, flat_buffer=False):\n self.skips = skips\n self.n_pi = n_pi\n self.nenvs = nenvs\n \n self.exp_replay = np.empty((*replay_shape, *ob_space.shape), dtype=np.uint8)\n self.vtarg_replay = np.empty(replay_shape, dtype=np.float32)\n \n self.num_return = num_return\n self.ac_space = ac_space\n self.ob_space = ob_space\n \n self.cooldown_counter = skips\n self.replay_index = 0\n self.flat_buffer = flat_buffer\n\n def update(self, obs_batch, vtarg_batch):\n if self.num_return == 0:\n return False\n \n if self.cooldown_counter > 0:\n self.cooldown_counter -= 1\n return False\n \n self.exp_replay[self.replay_index] = obs_batch\n self.vtarg_replay[self.replay_index] = vtarg_batch\n \n self.replay_index = (self.replay_index + 1) % self.n_pi\n return self.replay_index == 0\n \n def return_done(self):\n self.cooldown_counter = self.skips\n self.num_return -= 1\n self.replay_index = 0\n \n \n def make_minibatches(self, presleep_pi, num_rollouts):\n if not self.flat_buffer:\n env_segs = list(itertools.product(range(self.n_pi), range(self.nenvs)))\n np.random.shuffle(env_segs)\n env_segs = np.array(env_segs)\n for idx in range(0, len(env_segs), num_rollouts):\n esinds = env_segs[idx:idx+num_rollouts]\n mbatch = [flatten01(arr[esinds[:,0], : , esinds[:,1]]) \n for arr in (self.exp_replay, self.vtarg_replay, presleep_pi)]\n yield mbatch\n else:\n nsteps = self.vtarg_replay.shape[1]\n buffsize = self.n_pi * nsteps * self.nenvs\n inds = np.arange(buffsize)\n np.random.shuffle(inds)\n batchsize = num_rollouts * nsteps\n for start in range(0, buffsize, batchsize):\n end = start+batchsize\n mbinds = inds[start:end]\n mbatch = [flatten012(arr)[mbinds] \n for arr in (self.exp_replay, self.vtarg_replay, presleep_pi)]\n \n yield mbatch\n \n \nclass RewardNormalizer(object):\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n def __init__(self, gamma=0.99, cliprew=10.0, epsilon=1e-8):\n self.epsilon = epsilon\n self.gamma = gamma\n self.ret_rms = RunningMeanStd(shape=())\n self.cliprew = cliprew\n self.ret = 0. # size updates after first pass\n \n def normalize(self, rews, news, resetrew):\n self.ret = self.ret * self.gamma + rews\n self.ret_rms.update(self.ret)\n rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)\n if resetrew:\n self.ret[np.array(news, dtype=bool)] = 0. ## Values should be True of False to set positional index\n return rews\n \nclass RunningMeanStd(object):\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n def __init__(self, epsilon=1e-4, shape=()):\n self.mean = np.zeros(shape, 'float64')\n self.var = np.ones(shape, 'float64')\n self.count = epsilon\n\n def update(self, x):\n batch_mean = np.mean(x, axis=0)\n batch_var = np.var(x, axis=0)\n batch_count = x.shape[0]\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n self.mean, self.var, self.count = update_mean_var_count_from_moments(\n self.mean, self.var, self.count, batch_mean, batch_var, batch_count)\n\ndef update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n new_mean = mean + delta * batch_count / tot_count\n m_a = var * count\n m_b = batch_var * batch_count\n M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count\n new_var = M2 / tot_count\n new_count = tot_count\n\n return new_mean, new_var, new_count\n","repo_name":"yangysc/ResiNet","sub_path":"ray-master/rllib/policy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"17223805067","text":"import os\nimport dateutil.parser\nfrom flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask_api import FlaskAPI\nfrom flask_cors import CORS\nfrom flask_pymongo import PyMongo\nfrom flask_jwt_extended import (JWTManager, jwt_required, create_access_token, get_jwt_identity, get_jwt_claims)\nfrom pymongo import MongoClient\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\napp = FlaskAPI(__name__)\nCORS(app)\n\nMONGO_URL = os.environ.get('MONGO_URL')\n\nif not MONGO_URL:\n MONGO_URL = 'mongodb://admin:pass123@ds141320.mlab.com:41320/live-feed-db'\n\napp.config['MONGO_URI'] = MONGO_URL\napp.config['JWT_SECRET_KEY'] = 'super-secret'\nmongo = PyMongo(app, config_prefix='MONGO')\n\nDEVELOPER_KEY = 'AIzaSyBBkE1TxusqqxvF62mRcRXQEhtNSqYOXT4'\nYOUTUBE_API_SERVICE_NAME = 'youtube'\nYOUTUBE_API_VERSION = 'v3'\n\n# Setup the Flask-JWT-Extended extension\njwt = JWTManager(app)\n\n@app.route('/v1/api/getJWTToken', methods=['GET'])\ndef verify_token():\n user = mongo.db.users.find({ 'email' : request.args.get('email') })\n if (user.count() == 0):\n # User doesn't exist, create and insert user into database\n mongo.db.users.insert({ 'email' : request.args.get('email'), 'token' : request.args.get('token') })\n\n # Create JWT Access Token and send back to client-side\n access_token = create_access_token(identity=request.args.get('email'))\n return jsonify(access_token=access_token), 200\n\n@app.route('/v1/api/getStreamList', methods=['GET'])\n@jwt_required\ndef youtube_search():\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\n\n searchTerm = request.args.get('searchValue')\n\n # Set the previous or next page token if available\n if request.args.get('pageToken') is None:\n pageToken = ''\n else:\n pageToken = request.args.get('pageToken')\n\n # Call the search.list method to retrieve results matching the specified query term.\n search_response = youtube.search().list(\n q=searchTerm,\n part='id,snippet',\n eventType='live',\n type='video',\n maxResults=20,\n pageToken=pageToken\n ).execute()\n\n videos = []\n\n # Append Title, Video ID, and Thumbnail data into videos array\n for search_result in search_response.get('items', []):\n videos.append({\n 'title': search_result['snippet']['title'],\n 'id': search_result['id']['videoId'],\n 'thumbnail': search_result['snippet']['thumbnails']['high']\n })\n\n # Previous Page Token\n if 'prevPageToken' not in search_response:\n prevToken = ''\n else:\n prevToken = search_response['prevPageToken']\n\n # Next Page Token\n if 'nextPageToken' not in search_response:\n nextToken = ''\n else:\n nextToken = search_response['nextPageToken']\n\n response_data = {\n 'nextPageToken': nextToken,\n 'prevPageToken': prevToken,\n 'pageInfo': search_response['pageInfo'],\n 'videos': videos\n }\n\n return response_data\n\n@app.route('/v1/api/getStreamMessages', methods=['GET'])\n@jwt_required\ndef stream_messages():\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\n \n videoID = request.args.get('videoID')\n pageToken = request.args.get('pageToken')\n\n # Call the videos.list method to retrieve the liveChatID of the specified video.\n video_response = youtube.videos().list(\n part='id,snippet,liveStreamingDetails',\n id=videoID,\n ).execute()\n\n video = []\n\n for video_result in video_response.get('items', []):\n video.append({\n 'title': video_result['snippet']['title'],\n 'liveStreamID': video_result['liveStreamingDetails']\n })\n\n # Check if live chat ID exists\n if 'activeLiveChatId' not in video[0]['liveStreamID']:\n return []\n else:\n messages_response = youtube.liveChatMessages().list(\n liveChatId=video[0]['liveStreamID']['activeLiveChatId'],\n part='id,snippet,authorDetails',\n maxResults=200,\n pageToken=pageToken\n ).execute()\n\n for message_item in messages_response.get('items', []):\n existingMessage = mongo.db.messages.find({ 'id': message_item['id'] })\n\n # Skip existing messages to avoid saving duplicates\n if (existingMessage.count() == 0):\n # Save messages into database\n mongo.db.messages.insert({\n 'id' : message_item['id'],\n 'username' : message_item['authorDetails']['displayName'],\n 'message' : message_item['snippet']['textMessageDetails']['messageText'],\n 'published' : message_item['snippet']['publishedAt'],\n })\n\n date = dateutil.parser.parse(message_item['snippet']['publishedAt'])\n message_item['snippet']['publishedAt'] = date.ctime()\n\n return messages_response\n\n@app.route('/v1/api/getMessages', methods=['GET'])\n@jwt_required\ndef get_messages():\n searchTerm = request.args.get('searchValue')\n messages = []\n\n # Sort results by date/time published in ascending order\n userMessages = mongo.db.messages.find({ 'username' : searchTerm }).sort('published', 1)\n\n for message in userMessages:\n # Format time\n date = dateutil.parser.parse(message['published'])\n\n messages.append({\n 'username': message['username'],\n 'message': message['message'],\n 'published': date.ctime()\n })\n\n return messages\n\n@app.after_request\n\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n return response\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"march93/live-feed-server","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28049220402","text":"# Python script using Google Admin SDK\n# to loop through all pages of user info\n# have export to csv file\n\nfrom __future__ import print_function\nimport httplib2\nimport os\nimport pandas as pd\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\nSCOPES = 'https://www.googleapis.com/auth/admin.directory.user'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Directory API Python Quickstart'\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'admin-directory_v1-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef get_user_list():\n\n credentials = get_credentials()\n\n http = credentials.authorize(httplib2.Http())\n df = pd.DataFrame()\n service = discovery.build('admin', 'directory_v1', http=http)\n token_value = ''\n while token_value is not None:\n results = service.users().list(customer='my_customer', maxResults=500,\n orderBy='email',pageToken=token_value).execute()\n token_value = results.get('nextPageToken')\n user_info = pd.DataFrame.from_dict(results.get('users',[]))\n df = df.append(user_info)\n\n df.to_csv('<..>test-outputs/all-user-info.csv',index=False)\n\nif __name__ == '__main__':\n\n get_user_list()\n\n","repo_name":"b-odonoghue/helpful-scripts","sub_path":"user-list.py","file_name":"user-list.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20030548196","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport getopt\nimport json\nimport logging\nimport os.path\nimport sys\n\n\ndef check_python():\n info = sys.version_info\n if info[0] == 2:\n print(\"Python 3.3+ required\")\n sys.exit(1)\n elif info[0] == 3 and not info[1] >= 3:\n print(\"Python 3.3+ required\")\n sys.exit(1)\n elif info[0] not in [2, 3]:\n print(\"Python version not supported\")\n sys.exit(1)\n\n\ndef find_config():\n config_path = 'config.json'\n if os.path.exists(config_path):\n return config_path\n config_path = os.path.join(os.path.dirname(__file__), '..', 'config.json')\n if os.path.exists(config_path):\n return config_path\n return None\n\n\ndef get_config(is_local):\n logging.basicConfig(level=logging.INFO,\n format='%(levelname)s: %(message)s')\n\n if is_local:\n shortopts = 'hd:s:b:p:k:l:m:c:t:vq'\n longopts = ['help', 'fast-open', 'pid-file=', 'log-file=']\n else:\n shortopts = 'hd:s:p:k:m:c:t:vq'\n longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=']\n try:\n config_path = find_config()\n optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)\n for key, value in optlist:\n if key == '-c':\n config_path = value\n\n if config_path:\n logging.info('loading config from %s' % config_path)\n with open(config_path, 'rb') as f:\n try:\n config = json.loads(f.read().decode('utf-8'),\n object_hook=_decode_dict)\n except Exception as e:\n raise e\n except Exception as e:\n raise e\n","repo_name":"godontop/shadowsocks-py3","sub_path":"shadowsocks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37458081819","text":"#!/usr/bin/env python3\nfrom __future__ import annotations\n\nif __name__ == \"__main__\":\n position: int = 0\n depth: int = 0\n aim: int = 0\n\n with open(\"input\") as f:\n for line in f:\n tokens: list[str] = line.strip().split()\n command: str = tokens[0]\n offset: int = int(tokens[1])\n if command == \"forward\":\n position += offset\n depth += offset * aim\n elif command == \"down\":\n aim += offset\n elif command == \"up\":\n aim -= offset\n\n print(position * depth)\n","repo_name":"KSmanis/advent-of-code","sub_path":"2021/day_2/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31583355759","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass FlowPortLink(object):\n \"\"\"\n Details about the link between two data flow operators.\n \"\"\"\n\n #: A constant which can be used with the model_type property of a FlowPortLink.\n #: This constant has a value of \"CONDITIONAL_INPUT_LINK\"\n MODEL_TYPE_CONDITIONAL_INPUT_LINK = \"CONDITIONAL_INPUT_LINK\"\n\n #: A constant which can be used with the model_type property of a FlowPortLink.\n #: This constant has a value of \"OUTPUT_LINK\"\n MODEL_TYPE_OUTPUT_LINK = \"OUTPUT_LINK\"\n\n #: A constant which can be used with the model_type property of a FlowPortLink.\n #: This constant has a value of \"INPUT_LINK\"\n MODEL_TYPE_INPUT_LINK = \"INPUT_LINK\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new FlowPortLink object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.data_integration.models.InputLink`\n * :class:`~oci.data_integration.models.OutputLink`\n * :class:`~oci.data_integration.models.ConditionalInputLink`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this FlowPortLink.\n Allowed values for this property are: \"CONDITIONAL_INPUT_LINK\", \"OUTPUT_LINK\", \"INPUT_LINK\"\n :type model_type: str\n\n :param key:\n The value to assign to the key property of this FlowPortLink.\n :type key: str\n\n :param model_version:\n The value to assign to the model_version property of this FlowPortLink.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this FlowPortLink.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param object_status:\n The value to assign to the object_status property of this FlowPortLink.\n :type object_status: int\n\n :param description:\n The value to assign to the description property of this FlowPortLink.\n :type description: str\n\n :param port:\n The value to assign to the port property of this FlowPortLink.\n :type port: str\n\n \"\"\"\n self.swagger_types = {\n 'model_type': 'str',\n 'key': 'str',\n 'model_version': 'str',\n 'parent_ref': 'ParentReference',\n 'object_status': 'int',\n 'description': 'str',\n 'port': 'str'\n }\n\n self.attribute_map = {\n 'model_type': 'modelType',\n 'key': 'key',\n 'model_version': 'modelVersion',\n 'parent_ref': 'parentRef',\n 'object_status': 'objectStatus',\n 'description': 'description',\n 'port': 'port'\n }\n\n self._model_type = None\n self._key = None\n self._model_version = None\n self._parent_ref = None\n self._object_status = None\n self._description = None\n self._port = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['modelType']\n\n if type == 'INPUT_LINK':\n return 'InputLink'\n\n if type == 'OUTPUT_LINK':\n return 'OutputLink'\n\n if type == 'CONDITIONAL_INPUT_LINK':\n return 'ConditionalInputLink'\n else:\n return 'FlowPortLink'\n\n @property\n def model_type(self):\n \"\"\"\n **[Required]** Gets the model_type of this FlowPortLink.\n The model type of the object.\n\n Allowed values for this property are: \"CONDITIONAL_INPUT_LINK\", \"OUTPUT_LINK\", \"INPUT_LINK\"\n\n\n :return: The model_type of this FlowPortLink.\n :rtype: str\n \"\"\"\n return self._model_type\n\n @model_type.setter\n def model_type(self, model_type):\n \"\"\"\n Sets the model_type of this FlowPortLink.\n The model type of the object.\n\n\n :param model_type: The model_type of this FlowPortLink.\n :type: str\n \"\"\"\n allowed_values = [\"CONDITIONAL_INPUT_LINK\", \"OUTPUT_LINK\", \"INPUT_LINK\"]\n if not value_allowed_none_or_none_sentinel(model_type, allowed_values):\n raise ValueError(\n f\"Invalid value for `model_type`, must be None or one of {allowed_values}\"\n )\n self._model_type = model_type\n\n @property\n def key(self):\n \"\"\"\n Gets the key of this FlowPortLink.\n The key of the object.\n\n\n :return: The key of this FlowPortLink.\n :rtype: str\n \"\"\"\n return self._key\n\n @key.setter\n def key(self, key):\n \"\"\"\n Sets the key of this FlowPortLink.\n The key of the object.\n\n\n :param key: The key of this FlowPortLink.\n :type: str\n \"\"\"\n self._key = key\n\n @property\n def model_version(self):\n \"\"\"\n Gets the model_version of this FlowPortLink.\n The model version of an object.\n\n\n :return: The model_version of this FlowPortLink.\n :rtype: str\n \"\"\"\n return self._model_version\n\n @model_version.setter\n def model_version(self, model_version):\n \"\"\"\n Sets the model_version of this FlowPortLink.\n The model version of an object.\n\n\n :param model_version: The model_version of this FlowPortLink.\n :type: str\n \"\"\"\n self._model_version = model_version\n\n @property\n def parent_ref(self):\n \"\"\"\n Gets the parent_ref of this FlowPortLink.\n\n :return: The parent_ref of this FlowPortLink.\n :rtype: oci.data_integration.models.ParentReference\n \"\"\"\n return self._parent_ref\n\n @parent_ref.setter\n def parent_ref(self, parent_ref):\n \"\"\"\n Sets the parent_ref of this FlowPortLink.\n\n :param parent_ref: The parent_ref of this FlowPortLink.\n :type: oci.data_integration.models.ParentReference\n \"\"\"\n self._parent_ref = parent_ref\n\n @property\n def object_status(self):\n \"\"\"\n Gets the object_status of this FlowPortLink.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :return: The object_status of this FlowPortLink.\n :rtype: int\n \"\"\"\n return self._object_status\n\n @object_status.setter\n def object_status(self, object_status):\n \"\"\"\n Sets the object_status of this FlowPortLink.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :param object_status: The object_status of this FlowPortLink.\n :type: int\n \"\"\"\n self._object_status = object_status\n\n @property\n def description(self):\n \"\"\"\n Gets the description of this FlowPortLink.\n Detailed description for the object.\n\n\n :return: The description of this FlowPortLink.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"\n Sets the description of this FlowPortLink.\n Detailed description for the object.\n\n\n :param description: The description of this FlowPortLink.\n :type: str\n \"\"\"\n self._description = description\n\n @property\n def port(self):\n \"\"\"\n Gets the port of this FlowPortLink.\n Key of FlowPort reference\n\n\n :return: The port of this FlowPortLink.\n :rtype: str\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, port):\n \"\"\"\n Sets the port of this FlowPortLink.\n Key of FlowPort reference\n\n\n :param port: The port of this FlowPortLink.\n :type: str\n \"\"\"\n self._port = port\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/data_integration/models/flow_port_link.py","file_name":"flow_port_link.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"32060351943","text":"# /usr/bin/python3\n#-*- coding: utf-8-*-\n# cal-client-one2oned-pissarra.py \n# -------------------------------------\n# @ edt ASIX M06 Curs 2019-2020\n# Gener 2020\n# -------------------------------------\nimport sys,socket,argparse # Importar ARGPARSE\n\n# ARGPARSE\n\nparser = argparse.ArgumentParser(description=\"\"\"CAL client\"\"\") # Se inicializa el ARGPARSE\nparser.add_argument(\"-s\",\"--server\",type=str, default='',dest=\"server\") # Se conecta cualquiera. Si no se especifica el SERVIDOR, se conecta a cualquiera a LOCALHOST.\nparser.add_argument(\"-p\",\"--port\",type=int, default=50001,dest=\"port\") # El puerto a atacar es 50001\nargs=parser.parse_args() # Se pulsa el ARGPARSE\n\n# SOCKETS\n\nHOST = args.server # Se definen constantes para indicar a que HOST nos conectaremos, dest=server (Variable del ARGPARSE)\nPORT = args.port # Se definen constantes para indicar a que PORT nos conectaremos, dest=port (Variable del ARGPARSE)\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Se inicializa el SOCKET para conectarse\ns.connect((HOST, PORT)) # Se conecta al SERVIDOR\nwhile True: # Bucle infinit # Para que esté escuchando por si le envían más de 1 línea.\n data = s.recv(1024) # Recibe datos del SERVIDOR.\n if not data: break\n print('Data:', data.decode(\"utf-8\")) # Se printa en modo UTF 8 con DECODE data.decode(\"utf-8\")\ns.close()\nsys.exit(0)\n\n\"\"\"\n**24-calendar-client-one2one.py [-s server] [-p port]**\n\n**24-calendar-server-one2one.py [-p port] [-a any]**\n\n Calendar server amb un popen, el client es connecta i rep el calendari. El server \n tanca la connexió amb el client un cop contestat però continua escoltant noves connexions.\n\n El server ha de governar-se amb senyals que fan:\n - sigusr1: llista de peers i plega.\n - sigusr2: count de listpeer i plega.\n - sigterm: llista de peers, count i plega.\n\n El server és un daemon que es queda en execució després de fer un fork del seu\n pare (que mor) i es governa amb senyals.\n\n Que sigui el servidor qui rep l'any com a argument és una tonteria, seria més lògic\n fer-ho en el client, però el diàleg client servidor queda per a una pràctica\n posterior. Aquí es vol practicar usar un arg en el popen.\n \n \n### SOLUCIÓN\n\n# CLIENTE\n\n\t1. Se conecta a ATACAR al SERVIDOR que quiere. Y el PUERTO también.\n\t\n\t2. Se conecta al SOCKET destino, mediante HOST y PORT y se queda ESCUCHANDO.\n\t\n\t3. Cuando ya no tenga más datos a RECIBIR. Hace un BREAK. Finaliza y muestra la información en DESCODIFICACIÓN DE BINARIO A UTF8\n\n# SERVIDOR\n\n\t1. Se abre el SERVIDOR en DETACH, el CLIENTE se conecta. \n\t\n\t2. El servidor CALENDAR, le vomita la información.\n\t\n\t3. El CLIENTE finaliza.\n\t\n\t4. El SERVIDOR escucha al siguiente y lo añade a una LISTA VACÍA mediante APPEND.\n\t\n\t5. Le envíamos con un KILL -15 $(pgrep python).\n\t\n\t6. El SERVIDOR responde:\n\t\n\tubuntu@keshi:~/Documents/ipc$ kill -15 $(pgrep python)\nubuntu@keshi:~/Documents/ipc$ Signal handler called with signal: 15\n[('127.0.0.1', 52574), ('127.0.0.1', 52576)] 2\n\n\t7. Significa que ha recibido la SEÑAL 15 que es SIGTERM --> Si ha recibido esa señal, termina el programa Y MOSTRARÁ QUIÉN SE HA CONECTADO + LA LONGITUD de USUARIOS CONECTADOS \n\t\n\t\n\t---\n\t\n\t\n## KILL y PRUEBAs\n\n1. Se enciende el SERVIDOR en DETACH.\n\n2. El cliente se conecta (s.connect((HOST,PORT)))y hace PETICIONES, se queda ESCUCHANDO y RECIBIENDO MENSAJES Infinitamente. Hasta que no tenga nada más que recibir.\n\n3. El cliente finaliza y se conecta otro desde otro puerto dinámico de ORIGEN.\n\n4. El servidor va registrando quién entra IP y PORT y CUENTA cuantos usuarios HAN ENTRADO (Lista vacía llistaPeers.append(addr, lo printa con un len print(llistaPeers, len(llistaPeers)) )\n\t\n5. Desde otra CONSOLA le enviamos: (Se han CONECTADO 3 CLIENTES 1by1, de uno en uno)\n\n\tkill -10 $(pgrep python) --> SIGUSR1 (Print de CONEXIONES ACTIVAS)\n\t\n\tubuntu@keshi:~/Documents/ipc$ Signal handler called with signal: 10\n[('127.0.0.1', 52582), ('127.0.0.1', 52584)]\nConnected by ('127.0.0.1', 52586)\n\n\t\n\tkill -12 $(pgrep python) --> SIGUSR2 (Print de CUANTOS SE HAN CONECTADO (LEN(lista)))\n\n\t\n\tubuntu@keshi:~/Documents/ipc$ Signal handler called with signal: 12\n3\n\n\n\tkill -15 $(pgrep python) --> SIGTERM 15 (TERMINA EL PROGRAMA)\n\t\n\tubuntu@keshi:~/Documents/ipc$ Signal handler called with signal: 15\n[('127.0.0.1', 52582), ('127.0.0.1', 52584), ('127.0.0.1', 52586)] 3\n\n\t\n\t\nIMPORTANTE EL SYS.EXIT(0) LO HEMOS QUITADO UN MOMENTO DEL CLIENTE\n\n# sys.exit(0) # 4. Sale del programa # Si se quiere mantener y saber cuantos tenemos, lo comentamos\n\n\n\n\"\"\"\n","repo_name":"KeshiKiD03/Python-ipc","sub_path":"24-cal-client-one2one.py","file_name":"24-cal-client-one2one.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7932198054","text":"import logging\nimport time\nimport os\n\ndef create_logger(logger_name = \"log\", root_path = './logs/'):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO) # Log等级总开关\n\n curr_time = time.localtime(time.time())\n log_dir = time.strftime('%Y-%m-%d', curr_time)\n time_dir = time.strftime('%H-%M-%S', curr_time)\n\n save_dir = root_path + log_dir + \"/\" + time_dir\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n log_file_dir = save_dir + \"/\" + \"log.txt\"\n fh = logging.FileHandler(log_file_dir, mode='w')\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n fh.setLevel(logging.INFO) # 输出到file的log等级的开关\n logger.addHandler(fh)\n return logger, save_dir\n\n","repo_name":"hbr690188270/cs291k_hw","sub_path":"hw2/hw2/logging_module.py","file_name":"logging_module.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29971039877","text":"'''\n合并有序数组\n'''\narray_1 = [1, 3, 4, 6, 10]\narray_2 = [2, 5, 8, 11]\n\nans = array_1.copy()\nind = 0\nfor i in range(len(array_2)):\n while (ind < len(array_1)):\n if array_2[i] <= array_1[ind]:\n ans.insert(ind + i, array_2[i])\n break\n else:\n ind += 1\n else:\n ans = ans + array_2[i:]\n break\nprint(ans)\n","repo_name":"Myfour/Python_Basic","sub_path":"algorithm/array_merging.py","file_name":"array_merging.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15419325989","text":"from werkzeug.wrappers import Request\n\n\nclass middleware:\n \"\"\"\n Middleware that does extra logging to make it more transparent what happens in the system\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n request = Request(environ)\n print(f\"Starting processing request {request} from {request.remote_addr}\")\n res = self.app(environ, start_response)\n print(f\"Finished processing request {request} from {request.remote_addr}\")\n return res\n","repo_name":"nygrenh/distributed-card-game","sub_path":"src/flask_middleware.py","file_name":"flask_middleware.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33979782695","text":"# coding=utf-8\nimport pymysql\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS\nimport os\n\n\napp = Flask(__name__)\n\napp.config['JSON_AS_ASCII'] = False\nCORS(app, resources=r'/*')\n\nroot_dir = os.path.abspath(os.path.join(os.getcwd(), \"../\"))\n\n\n@app.route('/')\ndef index():\n cursor = local_db.cursor()\n find_sql = \"select * from inventors where inventor_id=74 limit 1\"\n cursor.execute(find_sql)\n res = cursor.fetchall()[0]\n print(res)\n return jsonify(res)\n\n\n# 返回ipc_top_inventor表中全部信息\n# http://127.0.0.1:5000/ipc_category_info\n@app.route('/ipc_category_info')\ndef getIpcCategoryInfo():\n # ipc_word = request.args.get(\"ipc_category\")\n cursor = local_db.cursor(cursor=pymysql.cursors.DictCursor)\n sql = 'select * from ipc_top_inventor'\n cursor.execute(sql)\n res = cursor.fetchall()\n return jsonify(res)\n\n\n# 返回ipc_category大类下全部inventor_id列表\n# http://127.0.0.1:5000/ipc_category_all_inventors?ipc_category=A\n@app.route('/ipc_category_all_inventors')\ndef getIpcCategoryAllInventors():\n ipc_category = request.args.get(\"ipc_category\")\n cursor = local_db.cursor()\n sql = \"select inventor_id from ipc_inventors where ipc_category = '\" + ipc_category + \"';\"\n cursor.execute(sql)\n res_tuple = cursor.fetchall()\n res = []\n for t in res_tuple:\n res.append(t[0])\n return jsonify(res)\n\n\n# 返回发明人id对应的全部category大类和数量列表\n# http://127.0.0.1:5000//inventor_all_category?inventor=5\n# @app.route('/inventor_all_category')\n# def getInventorAllCategory():\n# inventor_id = request.args.get(\"inventor_id\")\n# print(inventor_id,\"=======\")\n# cursor = local_db.cursor()\n# sql = \"select ipc_category from ipc_inventors where inventor_id = '\" + str(inventor_id) + \"';\"\n# cursor.execute(sql)\n# res_tuple = cursor.fetchall()\n# res = {}\n# for t in res_tuple:\n# if t[0] not in res_tuple:\n# res[t[0]] = 1\n# else:\n# res[t[0]] += 1\n # return jsonify(res)\n\n\n# 返回对应inventor的简要信息\n# http://127.0.0.1:5000/inventor_info_brief?id=1\n@app.route('/inventor_info_brief')\ndef getInventorInfoBrief():\n inventor_id = request.args.get(\"id\")\n # print(inventor_id)\n\n try:\n brief_db = pymysql.connect(host='localhost',\n user='root',\n password='password',\n database='report')\n except:\n print(\"connect database fail!\")\n\n cursor = brief_db.cursor(cursor=pymysql.cursors.DictCursor)\n column_names = ['inventor_id', 'inventor_name', 'inventor_companys',\n 'inventor_patents_totalnum', 'average_score', 'T_index']\n sql_column = ''\n for colunm_name in column_names:\n sql_column += (colunm_name+', ')\n sql_column = sql_column[0:-2]\n\n sql_brief = \"select \" + sql_column + \\\n \" from inventors where inventor_id = \"+str(inventor_id) + \";\"\n # print(sql_brief)\n cursor.execute(sql_brief)\n inventor_brief_info = cursor.fetchone()\n # print(inventor_brief_info)\n\n return inventor_brief_info\n\n\n# 返回inventor全部信息\n# http://127.0.0.1:5000/inventor_info_all?id=1\n@app.route('/inventor_info_all')\ndef getInventorInfoAll():\n inventor_id = request.args.get(\"id\")\n # print(inventor_id)\n\n try:\n brief_db = pymysql.connect(host='localhost',\n user='root',\n password='password',\n database='report')\n except:\n print(\"connect database fail!\")\n\n cursor = brief_db.cursor(cursor=pymysql.cursors.DictCursor)\n column_names = ['inventor_id', 'inventor_name', 'patents_ids', 'patents_ipcs', 'inventor_companys', 'collaborators', 'inventor_categories',\n 'inventor_patents_totalnum', 'average_score', 'T_index']\n sql_column = ''\n for colunm_name in column_names:\n sql_column += (colunm_name+', ')\n sql_column = sql_column[0:-2]\n\n sql_brief = \"select \" + sql_column + \\\n \" from inventors where inventor_id = \"+str(inventor_id) + \";\"\n # print(sql_brief)\n cursor.execute(sql_brief)\n inventor_brief_info = cursor.fetchone()\n # print(inventor_brief_info)\n\n return jsonify(inventor_brief_info)\n\n\n# 根据patent_id返回专利信息\n# http://127.0.0.1:5000/patent_info?id=77735\n@app.route('/patent_info')\ndef getPatentInfo():\n patent_id = request.args.get(\"id\")\n # print(inventor_id)\n\n cursor = remote_db.cursor(cursor=pymysql.cursors.DictCursor)\n column_names = ['id', 'company_id', 'name', 'status', 'patent_type', 'num', 'patenter',\n 'patenter_now', 'inventor', 'designer', 'ipc', 'info', 'public_date', 'application_date', 'address', 'patent_score']\n sql_column = ''\n for colunm_name in column_names:\n sql_column += (colunm_name+', ')\n sql_column = sql_column[0:-2]\n\n sql = \"select \" + sql_column + \\\n \" from company_patent where id = \"+str(patent_id) + \";\"\n # print(sql)\n cursor.execute(sql)\n patent_info = cursor.fetchone()\n # print(patent_info)\n\n return jsonify(patent_info)\n\n\n# 根据发明家姓名返回对应id列表\n# http://127.0.0.1:5000/search_inventors?name=张元\n@app.route('/search_inventors')\ndef getInventorsByName():\n name = request.args.get('name')\n sql = \"select inventor_id from inventors where inventor_name like '%\" + name + \"%';\"\n cursor = local_db.cursor()\n cursor.execute(sql)\n res_tuple = cursor.fetchall()\n res = []\n for t in res_tuple:\n res.append(t[0])\n return jsonify(res)\n\n# 查询数据库处理专利数量\n# http://127.0.0.1:5000/system_patent_number\n@app.route('/system_data')\ndef getSystemData():\n try:\n remote_db = pymysql.connect(host='120.27.209.14',\n port=22936,\n user='junshi',\n password='junshi_suwen',\n database='Report')\n except:\n print(\"connect database fail!\")\n\n with open(root_dir+'\\\\databaseScript\\\\inventor_table\\\\current_patent_id.txt', 'r', encoding=\"utf-8\")as f:\n max_patent_id = int(f.read())\n sql = \"select count(id) from company_patent where id<\"+str(max_patent_id)+\";\"\n cursor = remote_db.cursor()\n cursor.execute(sql)\n res = cursor.fetchone()[0]\n return str(res)\n\n# 查询已有人才数量\n@app.route(\"/system_inventor_number\")\ndef getInventorNumber():\n sql = \"select count(inventor_id) from inventors;\"\n cursor = local_db.cursor()\n cursor.execute(sql)\n res = cursor.fetchone()[0]\n return str(res)\n\n\n\n\nif __name__ == '__main__':\n print(123)\n\n try:\n local_db = pymysql.connect(host='localhost',\n user='root',\n password='password',\n database='report')\n except:\n print(\"connect database fail!\")\n print(123)\n\n try:\n remote_db = pymysql.connect(host='120.27.209.14',\n port=22936,\n user='junshi',\n password='junshi_suwen',\n database='Report')\n except:\n print(\"connect database fail!\")\n app.run()\n","repo_name":"chengleileilei/graduation","sub_path":"back_end/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32363980306","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 23 20:55:55 2022\n\n@author: zhi\n\"\"\"\n\nimport pickle\nimport numpy as np\nfrom PIL import Image\n\nfrom torch.autograd import Variable \nimport random\n\n\ndef read_features(images, model, transform):\n \n model = model.eval()\n #model.cuda()\n features = []\n f = []\n for img in images:\n #print(img.shape)\n x = Variable(transform(Image.fromarray(np.squeeze(img), mode=\"L\")), volatile=True)\n x = x.cuda()\n feature = model(x.unsqueeze(0))\n feature = feature.cpu().data.numpy()\n f.append(feature)\n feature = feature / np.linalg.norm(feature) # Normalize\n features.append(feature[0])\n \n features = np.array(features)\n return features\n \n\n\ndef classExemplars_euclidean(m, images, model, transform):\n \n features = read_features(images, model, transform)\n center = centerComputing(features)\n centers = np.tile(center, (len(features), 1))\n distances = np.linalg.norm((features-centers), axis=1)\n\n ind = np.argsort(distances)[:m]\n exemplar_set = np.array(images)[ind]\n exemplar_features = features[ind]\n\n return exemplar_set, exemplar_features, center\n \n\ndef classExemplars_similar(m, images, model, transform):\n \n features = read_features(images, model, transform)\n center = centerComputing(features)\n centers = np.tile(center, (len(features), 1))\n similarities = np.matmul(features, centers.T)[:, 0]\n \n ind = np.argsort(np.abs(similarities))[:m]\n exemplar_set = np.array(images)[ind]\n exemplar_features = features[ind]\n\n return exemplar_set, exemplar_features, center\n\n\ndef classExemplars_random(m, images, model=None, transform=None):\n \n #features = read_features(images, model, transform)\n #center = centerComputing(features)\n \n ind = random.sample(range(len(images)), m)\n exemplar_set = np.array(images)[ind]\n #exemplar_features = features[ind]\n \n return exemplar_set\n\n\ndef createExemplars(opt, original_dataset, model_old=None, transform=None):\n \n exemplar_sets = []\n exemplar_labels = [] \n exemplar_features_sets = []\n exemplar_centers = []\n if opt.fixed_memory == 0:\n opt.memory_per_class = opt.memory_size\n else:\n opt.memory_per_class = opt.fixed_memory // opt.num_init_classes + 1\n \n for c in range(0, opt.num_init_classes):\n print(\"Class: \", c)\n c_dataset = original_dataset.get_image_class(c)\n exemplar_set = classExemplars_random(int(opt.memory_per_class), c_dataset) # classExemplars_similar(int(opt.memory_per_class), c_dataset, model_old, transform) #\n #exemplar_center = centerComputing(exemplar_features)\n #exemplar_centers.append(exemplar_center)\n exemplar_sets.append(exemplar_set)\n #exemplar_features_sets.append(exemplar_features)\n exemplar_labels = exemplar_labels + [c]*int(opt.memory_per_class)\n \n #exemplar_sets = np.reshape(np.array(exemplar_sets), (opt.memory_per_class*opt.num_init_classes, 1, opt.img_size, opt.img_size)) #### 1\n exemplar_sets = np.reshape(np.array(exemplar_sets), (opt.memory_per_class*opt.num_init_classes, opt.img_size, opt.img_size, 3)) \n exemplar_labels = np.squeeze(np.array(exemplar_labels)) \n \n with open(opt.exemplar_file, \"wb\") as f:\n pickle.dump((exemplar_sets, exemplar_labels, exemplar_features_sets, exemplar_centers), f)\n \n return exemplar_sets, exemplar_labels, exemplar_centers\n\n\ndef centerComputing(features):\n \n return np.mean(features, 0)\n\n\n","repo_name":"gawainxu/OpenIncremen","sub_path":"exemplars.py","file_name":"exemplars.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13866894715","text":"def calculate_length(text):\n return len(text)\n \ndef count_letter_str(text,char):\n count=0\n for ch in text:\n if ch==char:\n count+=1\n return count\n\ndef start():\n str_text=input(\"Enter Text:\") \n str_char=input(\"Enter the Character you need count:\")\n count_char=count_letter_str(str_text,str_char)\n print(str(count_char))\n\nif __name__ == \"__main__\":\n start()\n","repo_name":"iamharekrishna/python-practice","sub_path":"string_demo.py","file_name":"string_demo.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40771820276","text":"import data_processing as dp\nimport evaluation as evaluation\nimport h5_to_gluonts as hg\nimport make_forecast as fc\nimport numpy as np\nimport sys\nimport json\nimport csv\nfrom evaluation import Forecast\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n path = input(\"Missing program argument: metadata path\\n\"\n \"please give it:\")\n if path == \"\":\n exit()\n else:\n path = sys.argv[1]\n with open(path) as md_file:\n md = json.load(md_file)\n print(str(md))\n print(\"loading data...\")\n train, valid, test = hg.load_h5_to_gluon(md)\n train = None\n valid = None\n if md['normalize']:\n for data in (test,):\n for n in range(len(data)):\n data.list_data[n]['target'], data.list_data[n]['scaler'] = dp.preprocess_data(\n data.list_data[n]['target'])\n predictor = fc.load_predictor(md['serialize_path'], md)\n test_slices = evaluation.split_validation(test, md)\n test = None\n ss = []\n while len(test_slices) > 100:\n ss.append(test_slices[:100])\n test_slices = test_slices[100:]\n if len(test_slices) > 1:\n ss.append(test_slices)\n test_slices = None\n crps = []\n mse = []\n i = 1\n for slices in ss:\n print(f'{i} of {len(ss)}')\n i += 1\n print(\"making predictions...\")\n forecast = fc.make_forecast_vector(predictor, slices, md)\n if md['estimator'] == \"TempFlow\":\n forecast = [\n Forecast([slice[0].samples[::, ::, n] for n in range(md['sensors'])], [slice[0].mean[::, n] for n in range(md['sensors'])])\n for slice in forecast]\n else:\n forecast = [Forecast([sensor.samples for sensor in slice], [sensor.mean for sensor in slice]) for slice in\n forecast]\n print(\"Rescaling...\")\n slices, forecast = dp.postprocess_data_vector(slices, forecast)\n slices = dp.listdata_to_array(slices)\n print(\"evaluating...\")\n evals = np.stack(evaluation.validate_mp(slices[1:], forecast[:len(forecast) - 1], mse=False))\n crps.append(evals)\n evals = np.stack(evaluation.validate_mp(slices[1:], forecast[:len(forecast) - 1], mse=True))\n mse.append(evals)\n cjoin = crps[0]\n for i in crps[1:]:\n cjoin = np.append(cjoin, i, 0)\n crps = np.average(cjoin, 0)\n mjoin = mse[0]\n for i in mse[1:]:\n mjoin = np.append(mjoin, i, 0)\n mse = np.average(mjoin, 0)\n\n f = open(md['serialize_path'] + \"crps.csv\", 'w', newline='')\n writer = csv.writer(f)\n writer.writerows(crps)\n f.close()\n f = open(md['serialize_path'] + \"mse.csv\", 'w', newline='')\n writer = csv.writer(f)\n writer.writerows(mse)\n f.close()\n","repo_name":"AdrianPlesner/Probabalistic-Multivariate-Timeseries-Forecasting","sub_path":"src/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"810406918","text":"from files.classes import Enemy, battle_fighters\nfrom files.helpers import getItemColor, checkYorN, outOfEnergy\nfrom termcolor import colored\nimport random\n\ndef enemy_attack(_enemy_type):\n if (_enemy_type == 'Kraken' or _enemy_type == 'Shark'):\n if random.random() < 0.6: # 60% chance of hitting with a regular attack\n return random.randint(10, 25) # Regular attack damage\n else:\n return 0\n elif (_enemy_type == 'Kaiju' or _enemy_type == 'Kong'):\n if random.random() < 0.7: # 60% chance of hitting with a regular attack\n return random.randint(15, 35) # Regular attack damage\n else:\n return 0\n elif _enemy_type == 'Alien':\n if random.random() < 0.8: # 60% chance of hitting with a regular attack\n return random.randint(20, 45) # Regular attack damage\n else:\n return 0\n else:\n if random.random() < 0.6: # 60% chance of hitting with a regular attack\n return random.randint(10, 15) # Regular attack damage\n else:\n return 0\n\ndef pirate_sword():\n if random.random() < 0.6: # 60% chance of hitting with a sword\n return random.randint(15, 35) # Sword damage\n else:\n return 0\n\ndef pirate_cannon():\n if random.random() < 0.4: # 40% chance of hitting with a cannon\n return random.randint(25, 55) # Cannon damage\n else:\n return 0\n\ndef canPlayerSummon(_player):\n for item in _player.fighters:\n if (item == 'Kraken' or item == 'Shark') and _player.gold >= 100:\n return True\n elif (item == 'Kaiju' or item == 'Kong') and _player.gold >= 200:\n return True\n elif item == 'Alien' and _player.gold >= 300:\n return True\n else:\n return False\n\ndef spawnEnemy(_player):\n #enemy = Enemy()\n if not _player.fighters:\n fighter_options = [\"Shark\", \"Kraken\"]\n enemy_type = random.choice(fighter_options)\n if enemy_type == \"Shark\":\n enemy = Enemy(enemy_type, 100)\n return enemy\n else:\n enemy = Enemy(enemy_type, 100)\n return enemy\n elif len(_player.fighters) == 1:\n if \"Shark\" in _player.fighters:\n enemy = Enemy(\"Kraken\", 100)\n return enemy\n else:\n enemy = Enemy(\"Shark\", 100)\n return enemy\n elif len(_player.fighters) == 2:\n fighter_options = [\"Kaiju\", \"Kong\"]\n enemy_type = random.choice(fighter_options)\n if enemy_type == \"Kaiju\":\n enemy = Enemy(enemy_type, 200)\n return enemy\n else:\n enemy = Enemy(enemy_type, 200)\n return enemy\n elif len(_player.fighters) == 3:\n if \"Kaiju\" in _player.fighters:\n enemy = Enemy(\"Kong\", 200)\n return enemy\n else:\n enemy = Enemy(\"Kaiju\", 200)\n return enemy\n elif len(_player.fighters) == 4:\n enemy = Enemy(\"Alien\", 300)\n return enemy\n else:\n print(\"This 'else' clause in spawnEnemy should not be executed.\")\n\ndef enemySummonOption(_player):\n #print('Entered enemySummonOption...')\n options = []\n for item in _player.fighters:\n if (item == 'Kraken' or item == 'Shark') and _player.gold >= 100:\n #print(f'Added {item} to options')\n options.append(item)\n elif (item == 'Kaiju' or item == 'Kong') and _player.gold >= 200:\n #print(f'Added {item} to options')\n options.append(item)\n elif item == 'Alien' and _player.gold >= 300:\n #print(f'Added {item} to options')\n options.append(item)\n else:\n print('Nothing to add. Not good if you entered this ELSE clause in enemySummonOption')\n return options\n #print(f'You summon options are: {options}')\n return options\n\ndef enemySummonOptionAutoSelect(_player):\n options = []\n\n for item in _player.fighters:\n if (item == 'Kraken' or item == 'Shark') and _player.gold >= 100:\n # print(f'Added {item} to options')\n options.append(item)\n elif (item == 'Kaiju' or item == 'Kong') and _player.gold >= 200:\n # print(f'Added {item} to options')\n options.append(item)\n elif item == 'Alien' and _player.gold >= 300:\n # print(f'Added {item} to options')\n options.append(item)\n else:\n print('Nothing to add. Not good if you entered this ELSE clause in enemySummonOption')\n return options\n chosen_fighter = random.choice(options)\n\n return chosen_fighter\n\ndef validFighterSelection(_fighter_options):\n counter = 1\n counter_options = []\n display_fighter_options = [] # USED FOR INPUT LOOP LATER\n print(\"Available Fighters: \")\n for fighter in _fighter_options:\n counter_options.append(counter)\n display_fighter_options.append(f'{counter}. {fighter}')\n print(f'{counter}. {fighter}')\n counter += 1\n selected = input(\"Which fighter would you like to attack with (enter the number)? \")\n while not selected.strip().isdigit() or int(selected) not in counter_options:\n #print('That was not a valid option.')\n #print(f'{selected} was entered.')\n #print('Counter options are:')\n #print(counter_options)\n #print(not selected.strip().isdigit())\n #print(selected not in counter_options)\n for item in display_fighter_options:\n print(item)\n selected = input(\"Which fighter would you like to attack with (enter the number)? \")\n return _fighter_options[int(selected)-1]\n\n\ndef getFighterDamage(_fighter):\n if (_fighter == 'Kraken' or _fighter == 'Shark'):\n return random.randint(50, 85)\n elif (_fighter == 'Kaiju' or _fighter == 'Kong'):\n return random.randint(85, 125)\n elif _fighter == 'Alien':\n return random.randint(125, 200)\n\ndef getFighterCost(_fighter):\n if (_fighter == 'Kraken' or _fighter == 'Shark'):\n return 100\n elif (_fighter == 'Kaiju' or _fighter == 'Kong'):\n return 200\n elif _creature == 'Alien':\n return 300\n\ndef pirate_summon(_player):\n enemy_summon_options = enemySummonOption(_player)\n selected_fighter = validFighterSelection(enemy_summon_options)\n print('You have chosen to attack with your ' + colored(selected_fighter, getItemColor(selected_fighter)))\n damage = getFighterDamage(selected_fighter)\n cost = getFighterCost(selected_fighter)\n #print(f'This will deal ' + colored(damage,\"red\") + ' and cost ' + colored(cost,\"yellow\") + ' gold.')\n print('This summon cost ' + colored(cost,\"yellow\") + ' gold.')\n updated_player_gold = _player.gold - cost\n _player.update_gold(updated_player_gold)\n return damage\n\ndef pirateSummonFighter(item):\n if (item == 'Kraken' or item == 'Shark'):\n return 75\n elif (item == 'Kaiju' or item == 'Kong') and _player.gold >= 200:\n return 150\n elif item == 'Alien' and _player.gold >= 300:\n return 250\n else:\n return 0\n\ndef playerBattle(_player, _game_map, _current_area):\n print('You entered player battle!')\n #os.system('clear')\n enemy = spawnEnemy(_player)\n enemy_color = getItemColor(enemy.enemy_type)\n print('Oh snap! A ' + colored(enemy.enemy_type,enemy_color) + ' appears with ' + colored(enemy.energy,\"yellow\") + ' energy!')\n print(f'Your current energy level is {_player.energy} so keep that in mind...')\n decision = checkYorN(input(\"Do you want to fight it (y/n)? \"))\n if decision == 'n':\n print(\"Probably ok to skip this one (chicken).\")\n elif decision == 'y':\n _game_map[_current_area].update_consumed(True)\n print(\"let's do it!\")\n while _player.energy > 0 and enemy.energy > 0:\n print(\"Pirate's turn (enter number from options):\")\n #if _player.inventory and _player.gold > 100:\n if canPlayerSummon(_player):\n attack_choice = input(\"1:sword | 2:cannon | 3:summon > \")\n if attack_choice == '1':\n damage = pirate_sword()\n elif attack_choice == '2':\n damage = pirate_cannon()\n elif attack_choice == '3':\n damage = pirate_summon(_player)\n print(f'Dealing ' + colored(damage, \"red\") + ' damage. ')\n print('Your updated gold amount is: ' + colored(_player.gold, \"yellow\"))\n else:\n print(\"Invalid choice. Consider what you enter next time. \")\n print(\"Pirate's turn skipped.\")\n damage = 0\n else:\n attack_choice = input(\"1:sword | 2:cannon > \")\n if attack_choice == '1':\n damage = pirate_sword()\n elif attack_choice == '2':\n damage = pirate_cannon()\n else:\n print(\"Invalid choice. Pirate's turn skipped.\")\n damage = 0\n\n enemy_energy = enemy.energy - damage\n enemy.update_energy(max(0,enemy_energy))\n print('------------------')\n print(colored(enemy.enemy_type, enemy_color) + \" took \" + colored(damage,\"red\") +\n \" damage and now has \" + colored(enemy.energy, \"yellow\"))\n\n if outOfEnergy(enemy):\n print(f'{enemy.enemy_type} has been defeated')\n print('Killing ' + colored(enemy.enemy_type, enemy_color) + 's always makes you feel better.')\n _player.update_energy(100)\n print('Your energy has been fully restored to ' + colored(_player.energy, \"green\"))\n _player.add_to_fighters(enemy.enemy_type)\n print('Even more, a ' + enemy.enemy_type + ' has been added to your inventory!')\n print('Your fighters:')\n for item in _player.fighters:\n print(item)\n break\n\n damage = enemy_attack(enemy.enemy_type)\n pirate_energy = _player.energy - damage\n _player.update_energy(max(0,pirate_energy))\n print(\"You took \" + colored(damage, \"red\") +\n \" damage and now have \" + colored(_player.energy, \"green\"))\n\n if outOfEnergy(_player):\n print(f'Sadly, the {enemy.enemy_type} has defeated you.')\n break","repo_name":"taekwonkrypto/PirateAdventureGame","sub_path":"battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72371910564","text":"from django.contrib import admin\nfrom django.db.models import Case, IntegerField, Value, When\nfrom django.templatetags.static import static\nfrom django.urls import reverse_lazy\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import mark_safe\n\nfrom account import models\nfrom dragonstone.models import (\n EventSubmission,\n FreeformSubmission,\n MentorSubmission,\n PVMSplitSubmission,\n RecruitmentSubmission,\n SotMSubmission,\n)\n\n\n@admin.register(models.Account)\nclass AccountAdmin(admin.ModelAdmin):\n list_display = [\"name\", \"rank\", \"dragonstone_pts\", \"is_active\"]\n list_editable = [\"rank\"]\n list_filter = [\"is_active\", \"rank\"]\n search_fields = [\"name\"]\n readonly_fields = [\n \"recruitment_submissions\",\n \"sotm_submissions\",\n \"pvm_split_submissions\",\n \"mentor_submissions\",\n \"event_hosts_submissions\",\n \"event_participants_submissions\",\n \"event_donors_submissions\",\n ]\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"name\",\n \"rank\",\n (\"is_active\", \"is_alt\"),\n \"sotm_submissions\",\n \"pvm_split_submissions\",\n \"mentor_submissions\",\n \"event_hosts_submissions\",\n \"event_participants_submissions\",\n \"event_donors_submissions\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n queryset = super(AccountAdmin, self).get_queryset(request)\n\n recruitment_pts = RecruitmentSubmission.annotate_dragonstone_pts()\n sotm_pts = SotMSubmission.annotate_dragonstone_pts()\n pvm_splits_pts = PVMSplitSubmission.annotate_dragonstone_pts()\n mentor_pts = MentorSubmission.annotate_dragonstone_pts()\n event_pts = EventSubmission.annotate_dragonstone_pts()\n freeform_pts = FreeformSubmission.annotate_dragonstone_pts()\n\n dragonstone_pts = {}\n for obj in (\n recruitment_pts\n + sotm_pts\n + pvm_splits_pts\n + mentor_pts\n + event_pts\n + freeform_pts\n ):\n if obj[\"account\"] in dragonstone_pts.keys():\n dragonstone_pts[obj[\"account\"]] += obj[\"dragonstone_pts\"]\n else:\n dragonstone_pts[obj[\"account\"]] = obj[\"dragonstone_pts\"]\n\n whens = [\n When(id=account, then=d_pts) for account, d_pts in dragonstone_pts.items()\n ]\n return queryset.annotate(\n dragonstone_pts=Case(*whens, output_field=IntegerField(), default=Value(0))\n ).order_by(\"-dragonstone_pts\", \"name\")\n\n @admin.display(description=\"Dragonstone Points\", ordering=\"dragonstone_pts\")\n def dragonstone_pts(self, obj):\n if obj.dragonstone_pts >= 40:\n dragonstone_icon_url = static(\"img/dragonstone.webp\")\n return mark_safe(\n f'{obj.dragonstone_pts} '\n )\n return obj.dragonstone_pts\n\n @admin.display(description=\"Recruitment Submissions\")\n def recruitment_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_recruitmentsubmission_changelist')}?{urlencode({'recruiter': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"Skill of the Month Submissions\")\n def sotm_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_sotmsubmission_changelist')}?{urlencode({'account': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"PVM Split Submissions\")\n def pvm_split_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_pvmsplitsubmission_changelist')}?{urlencode({'accounts': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"Mentor Submissions\")\n def mentor_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_mentorsubmission_changelist')}?{urlencode({'mentors': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"Event Hosts Submissions\")\n def event_hosts_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_eventsubmission_changelist')}?{urlencode({'hosts': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"Event Participants Submissions\")\n def event_participants_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_eventsubmission_changelist')}?{urlencode({'participants': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n @admin.display(description=\"Event Donors Submissions\")\n def event_donors_submissions(self, obj):\n if obj.id:\n url = f\"{reverse_lazy(f'admin:dragonstone_eventsubmission_changelist')}?{urlencode({'donors': obj.id})}\"\n return mark_safe(f'Click Here')\n else:\n return \"-\"\n\n\n@admin.register(models.UserCreationSubmission)\nclass UserCreationSubmissionAdmin(admin.ModelAdmin):\n list_display = [\"username\", \"account\", \"phrase\", \"proof\", \"accepted\"]\n list_editable = [\"accepted\"]\n list_filter = [\"accepted\"]\n readonly_fields = [\"username\", \"account\", \"phrase\"]\n fieldsets = (\n (\n None,\n {\n \"fields\": (\"username\", \"account\", (\"proof\", \"phrase\"), \"accepted\"),\n },\n ),\n )\n","repo_name":"CiaranSanders/um_pb_leaderboard","sub_path":"account/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37880417196","text":"from typing import Dict, Any, List, Tuple\r\n\r\nfrom enmapboxprocessing.algorithm.fitgenericregressoralgorithm import FitGenericRegressorAlgorithm\r\nfrom enmapboxprocessing.algorithm.predictregressionalgorithm import PredictRegressionAlgorithm\r\nfrom enmapboxprocessing.algorithm.regressorperformancealgorithm import RegressorPerformanceAlgorithm\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom qgis.core import (QgsProcessingContext, QgsProcessingFeedback)\r\nfrom enmapbox.typeguard import typechecked\r\n\r\n\r\n@typechecked\r\nclass RegressionWorkflowAlgorithm(EnMAPProcessingAlgorithm):\r\n P_DATASET, _DATASET = 'dataset', 'Training dataset'\r\n P_REGRESSOR, _REGRESSOR = 'regressor', 'Regressor'\r\n P_RASTER, _RASTER = 'raster', 'Raster layer with features'\r\n P_MATCH_BY_NAME, _MATCH_BY_NAME = 'matchByName', 'Match regressor features and raster bands by name'\r\n P_NFOLD, _NFOLD = 'nfold', 'Number of cross-validation folds'\r\n P_OPEN_REPORT, _OPEN_REPORT = 'openReport', 'Open output cross-validation regressor performance report in ' \\\r\n 'webbrowser after running algorithm'\r\n P_OUTPUT_REGRESSOR, _OUTPUT_REGRESSOR = 'outputRegressor', 'Output regressor'\r\n P_OUTPUT_REGRESSION, _OUTPUT_REGRESSION = 'outputRegression', 'Output regression layer'\r\n P_OUTPUT_REPORT, _OUTPUT_REPORT = 'outputRegressorPerformance', 'Output cross-validation regressor performance ' \\\r\n 'report'\r\n\r\n def displayName(self) -> str:\r\n return 'Regression workflow'\r\n\r\n def shortDescription(self) -> str:\r\n return 'The regression workflow combines regressor fitting and map prediction.' \\\r\n 'Optionally, the cross-validation performance of the regressor can be assessed.'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._DATASET, 'Training dataset pickle file used for fitting the regressor.'),\r\n (self._REGRESSOR, 'Scikit-Learn Python code specifying a regressor.'),\r\n (self._RASTER, 'A raster layer used for prediction.'),\r\n (self._MATCH_BY_NAME, 'Whether to match raster bands and regressor features by name.'),\r\n (self._NFOLD, 'The number of folds used for assessing cross-validation performance. '\r\n 'Will be ignored, if the cross-validation performance assessment is skipped.'),\r\n (self._OPEN_REPORT, 'Whether to open the cross-validation performance report in the web browser. '\r\n 'Will be ignored, if the cross-validation performance assessment is skipped.'),\r\n (self._OUTPUT_REPORT, 'Output cross-validation performance report file destination.'),\r\n (self._OUTPUT_REGRESSOR, self.PickleFileDestination),\r\n (self._OUTPUT_REGRESSION, 'Predicted map file destination.')\r\n ]\r\n\r\n def group(self):\r\n return Group.Regression.value\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterRegressionDataset(self.P_DATASET, self._DATASET)\r\n self.addParameterRegressorCode(self.P_REGRESSOR, self._REGRESSOR)\r\n self.addParameterRasterLayer(self.P_RASTER, self._RASTER, None, True)\r\n self.addParameterBoolean(self.P_MATCH_BY_NAME, self._MATCH_BY_NAME, False, True, True)\r\n self.addParameterInt(self.P_NFOLD, self._NFOLD, 10, True, 2, 100)\r\n self.addParameterBoolean(self.P_OPEN_REPORT, self._OPEN_REPORT, True)\r\n self.addParameterFileDestination(\r\n self.P_OUTPUT_REPORT, self._OUTPUT_REPORT, self.ReportFileFilter, None, True, True\r\n )\r\n self.addParameterFileDestination(self.P_OUTPUT_REGRESSOR, self._OUTPUT_REGRESSOR, self.PickleFileFilter)\r\n self.addParameterRasterDestination(self.P_OUTPUT_REGRESSION, self._OUTPUT_REGRESSION, None, True, True)\r\n\r\n def checkParameterValues(self, parameters: Dict[str, Any], context: QgsProcessingContext) -> Tuple[bool, str]:\r\n filenameRegression = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_REGRESSION, context)\r\n raster = self.parameterAsRasterLayer(parameters, self.P_RASTER, context)\r\n if filenameRegression is not None:\r\n if raster is None:\r\n return False, f'Wrong or missing parameter value: {self._RASTER}'\r\n return True, ''\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n filenameDataset = self.parameterAsFile(parameters, self.P_DATASET, context)\r\n code = self.parameterAsString(parameters, self.P_REGRESSOR, context)\r\n raster = self.parameterAsRasterLayer(parameters, self.P_RASTER, context)\r\n matchByName = self.parameterAsBoolean(parameters, self.P_MATCH_BY_NAME, context)\r\n nfold = self.parameterAsInt(parameters, self.P_NFOLD, context)\r\n openReport = self.parameterAsBoolean(parameters, self.P_OPEN_REPORT, context)\r\n filenameRegressor = self.parameterAsFileOutput(parameters, self.P_OUTPUT_REGRESSOR, context)\r\n filenameRegression = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_REGRESSION, context)\r\n filenameReport = self.parameterAsFileOutput(parameters, self.P_OUTPUT_REPORT, context)\r\n\r\n with open(filenameRegressor + '.log', 'w') as logfile:\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n # fit regressor\r\n alg = FitGenericRegressorAlgorithm()\r\n alg.initAlgorithm()\r\n parameters = {\r\n alg.P_DATASET: filenameDataset,\r\n alg.P_REGRESSOR: code,\r\n alg.P_OUTPUT_REGRESSOR: filenameRegressor\r\n }\r\n self.runAlg(alg, parameters, None, feedback2, context, True)\r\n\r\n # prediction regression\r\n if filenameRegression is not None:\r\n alg = PredictRegressionAlgorithm()\r\n alg.initAlgorithm()\r\n parameters = {\r\n alg.P_RASTER: raster,\r\n alg.P_REGRESSOR: filenameRegressor,\r\n alg.P_MATCH_BY_NAME: matchByName,\r\n alg.P_OUTPUT_REGRESSION: filenameRegression\r\n }\r\n self.runAlg(alg, parameters, None, feedback2, context, True)\r\n\r\n # regressor performance\r\n if filenameReport is not None:\r\n alg = RegressorPerformanceAlgorithm()\r\n parameters = {\r\n alg.P_DATASET: filenameDataset,\r\n alg.P_REGRESSOR: filenameRegressor,\r\n alg.P_NFOLD: nfold,\r\n alg.P_OPEN_REPORT: openReport,\r\n alg.P_OUTPUT_REPORT: filenameReport\r\n }\r\n self.runAlg(alg, parameters, None, feedback2, context, True)\r\n\r\n result = {\r\n self.P_OUTPUT_REGRESSOR: filenameRegressor,\r\n self.P_OUTPUT_REGRESSION: filenameRegression,\r\n self.P_OUTPUT_REPORT: filenameReport,\r\n }\r\n self.toc(feedback, result)\r\n\r\n return result\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/regressionworkflowalgorithm.py","file_name":"regressionworkflowalgorithm.py","file_ext":"py","file_size_in_byte":7307,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"15541481578","text":"from libtoprammer.chip import *\n\n\nclass Chip_AtTinySPC_common(Chip):\n\tPROGCMD_SENDINSTR\t= 1 # Send an instruction to the chip\n\n\tSTAT_BUSY\t\t= 0x01 # Programmer is running a command\n\tSTAT_SDO\t\t= 0x02 # Raw SDO pin state\n\n\tdef __init__(self,\n\t\t chipPackage,\n\t\t chipPinVCC,\n\t\t chipPinsVPP,\n\t\t chipPinGND,\n\t\t signature,\n\t\t flashPageSize,\n\t\t flashPages,\n\t\t eepromPageSize,\n\t\t eepromPages,\n\t\t nrFuseBits,\n\t\t nrLockBits):\n\t\tChip.__init__(self,\n\t\t\t chipPackage=chipPackage,\n\t\t\t chipPinVCC=chipPinVCC,\n\t\t\t chipPinsVPP=chipPinsVPP,\n\t\t\t chipPinGND=chipPinGND)\n\t\tself.signature = signature\n\t\tself.flashPageSize = flashPageSize\n\t\tself.flashPages = flashPages\n\t\tself.eepromPageSize = eepromPageSize\n\t\tself.eepromPages = eepromPages\n\t\tself.nrFuseBits = nrFuseBits\n\t\tself.nrLockBits = nrLockBits\n\n\tdef readSignature(self):\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Reading signature\", 0)\n\t\tsignature = self.__readSignature()\n\t\tself.progressMeterFinish()\n\t\treturn signature\n\n\tdef erase(self):\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Erasing chip\", 0)\n\t\tself.__sendInstr(SDI=0x80, SII=0x4C)\n\t\tself.__sendInstr(SDI=0x00, SII=0x64)\n\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\tself.__waitHighSDO()\n\t\tself.__sendNOP()\n\t\tself.progressMeterFinish()\n\n\tdef readProgmem(self):\n\t\tnrWords = self.flashPages * self.flashPageSize\n\t\timage = b\"\"\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Reading flash\", nrWords)\n\t\tself.__sendReadFlashInstr()\n\t\tcurrentHigh = -1\n\t\tbufferedBytes = 0\n\t\tfor word in range(0, nrWords):\n\t\t\tself.progressMeter(word)\n\t\t\tlow = word & 0xFF\n\t\t\thigh = (word >> 8) & 0xFF\n\t\t\tself.__sendInstr(SDI=low, SII=0x0C)\n\t\t\tif high != currentHigh:\n\t\t\t\tself.__sendInstr(SDI=high, SII=0x1C)\n\t\t\t\tcurrentHigh = high\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x68)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\t\tself.__readSDOBufferHigh()\n\t\t\tbufferedBytes += 1\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x78)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7C)\n\t\t\tself.__readSDOBufferHigh()\n\t\t\tbufferedBytes += 1\n\t\t\tif bufferedBytes == self.top.getBufferRegSize():\n\t\t\t\timage += self.top.cmdReadBufferReg(bufferedBytes)\n\t\t\t\tbufferedBytes = 0\n\t\timage += self.top.cmdReadBufferReg(bufferedBytes)\n\t\tself.progressMeterFinish()\n\t\treturn image\n\n\tdef writeProgmem(self, image):\n\t\tnrWords = self.flashPages * self.flashPageSize\n\t\tif len(image) > nrWords * 2 or len(image) % 2 != 0:\n\t\t\tself.throwError(\"Invalid flash image size %d (expected <=%d and word aligned)\" %\\\n\t\t\t\t(len(image), nrWords * 2))\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Writing flash\", len(image) // 2)\n\t\tself.__sendWriteFlashInstr()\n\t\tcurrentHigh = -1\n\t\tfor word in range(0, len(image) // 2):\n\t\t\tself.progressMeter(word)\n\t\t\tlow = word & 0xFF\n\t\t\thigh = (word >> 8) & 0xFF\n\t\t\tself.__sendInstr(SDI=low, SII=0x0C)\n\t\t\tself.__sendInstr(SDI=byte2int(image[word * 2 + 0]), SII=0x2C)\n\t\t\tself.__sendInstr(SDI=byte2int(image[word * 2 + 1]), SII=0x3C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7D)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7C)\n\t\t\tif ((word + 1) % self.flashPageSize == 0) or word == len(image) // 2 - 1:\n\t\t\t\tif currentHigh != high:\n\t\t\t\t\tself.__sendInstr(SDI=high, SII=0x1C)\n\t\t\t\t\tcurrentHigh = high\n\t\t\t\tself.__sendInstr(SDI=0x00, SII=0x64)\n\t\t\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\t\t\tself.__waitHighSDO()\n\t\tself.__sendNOP()\n\t\tself.progressMeterFinish()\n\n\tdef readEEPROM(self):\n\t\tnrBytes = self.eepromPages * self.eepromPageSize\n\t\timage = b\"\"\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Reading EEPROM\", nrBytes)\n\t\tself.__sendReadEEPROMInstr()\n\t\tcurrentPage = -1\n\t\tbufferedBytes = 0\n\t\tfor i in range(0, nrBytes):\n\t\t\tself.progressMeter(i)\n\t\t\tlow = i & 0xFF\n\t\t\thigh = (i >> 8) & 0xFF\n\t\t\tself.__sendInstr(SDI=low, SII=0x0C)\n\t\t\tif currentPage != high:\n\t\t\t\tself.__sendInstr(SDI=high, SII=0x1C)\n\t\t\t\tcurrentPage = high\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x68)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\t\tself.__readSDOBufferHigh()\n\t\t\tbufferedBytes += 1\n\t\t\tif bufferedBytes == self.top.getBufferRegSize():\n\t\t\t\timage += self.top.cmdReadBufferReg(bufferedBytes)\n\t\t\t\tbufferedBytes = 0\n\t\timage += self.top.cmdReadBufferReg(bufferedBytes)\n\t\tself.progressMeterFinish()\n\t\treturn image\n\n\tdef writeEEPROM(self, image):\n\t\tnrBytes = self.eepromPages * self.eepromPageSize\n\t\tif len(image) > nrBytes:\n\t\t\tself.throwError(\"Invalid EEPROM image size %d (expected <=%d)\" %\\\n\t\t\t\t(len(image), nrBytes))\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Writing EEPROM\", len(image))\n\t\tself.__sendWriteEEPROMInstr()\n\t\tfor i in range(0, len(image)):\n\t\t\tself.progressMeter(i)\n\t\t\tlow = i & 0xFF\n\t\t\thigh = (i >> 8) & 0xFF\n\t\t\tself.__sendInstr(SDI=low, SII=0x0C)\n\t\t\tself.__sendInstr(SDI=high, SII=0x1C)\n\t\t\tself.__sendInstr(SDI=byte2int(image[i]), SII=0x2C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6D)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x64)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\t\tself.__waitHighSDO()\n\t\tself.__sendNOP()\n\t\tself.progressMeterFinish()\n\n\tdef readFuse(self):\n\t\tusedBitsMask = (1 << self.nrFuseBits) - 1\n\t\tunusedBitsMask = usedBitsMask ^ 0xFFFFFF\n\t\tfuses = []\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Reading fuses\", 0)\n\t\tself.__sendInstr(SDI=0x04, SII=0x4C)\n\t\tself.__sendInstr(SDI=0x00, SII=0x68)\n\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\tself.__readSDOBufferHigh()\n\t\tfuses.append(int2byte(self.top.cmdReadBufferReg8() |\n\t\t\t\t ((unusedBitsMask >> 0) & 0xFF)))\n\t\tif self.nrFuseBits > 8:\n\t\t\tself.__sendInstr(SDI=0x04, SII=0x4C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7A)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7E)\n\t\t\tself.__readSDOBufferHigh()\n\t\t\tfuses.append(int2byte(self.top.cmdReadBufferReg8() |\n\t\t\t\t\t ((unusedBitsMask >> 8) & 0xFF)))\n\t\tif self.nrFuseBits > 16:\n\t\t\tself.__sendInstr(SDI=0x04, SII=0x4C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6A)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6E)\n\t\t\tself.__readSDOBufferHigh()\n\t\t\tfuses.append(int2byte(self.top.cmdReadBufferReg8() |\n\t\t\t\t\t ((unusedBitsMask >> 16) & 0xFF)))\n\t\tself.progressMeterFinish()\n\t\treturn b\"\".join(fuses)\n\n\tdef writeFuse(self, image):\n\t\tif len(image) != roundup(self.nrFuseBits, 8) // 8:\n\t\t\tself.throwError(\"Invalid Fuses image size %d (expected %d)\" %\\\n\t\t\t\t(len(image), roundup(self.nrFuseBits, 8) // 8))\n\t\tusedBitsMask = (1 << self.nrFuseBits) - 1\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Writing fuses\", 0)\n\t\tself.__sendInstr(SDI=0x40, SII=0x4C)\n\t\tself.__sendInstr(SDI=(image[0] & (usedBitsMask >> 0)), SII=0x2C)\n\t\tself.__sendInstr(SDI=0x00, SII=0x64)\n\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\tself.__waitHighSDO()\n\t\tif self.nrFuseBits > 8:\n\t\t\tself.__sendInstr(SDI=0x40, SII=0x4C)\n\t\t\tself.__sendInstr(SDI=(image[1] & (usedBitsMask >> 8)), SII=0x2C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x74)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x7C)\n\t\t\tself.__waitHighSDO()\n\t\tif self.nrFuseBits > 16:\n\t\t\tself.__sendInstr(SDI=0x40, SII=0x4C)\n\t\t\tself.__sendInstr(SDI=(image[2] & (usedBitsMask >> 16)), SII=0x2C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x66)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6E)\n\t\t\tself.__waitHighSDO()\n\t\tself.progressMeterFinish()\n\n\tdef readLockbits(self):\n\t\tusedBitsMask = (1 << self.nrLockBits) - 1\n\t\tunusedBitsMask = usedBitsMask ^ 0xFF\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Reading lockbits\", 0)\n\t\tself.__sendInstr(SDI=0x04, SII=0x4C)\n\t\tself.__sendInstr(SDI=0x00, SII=0x78)\n\t\tself.__sendInstr(SDI=0x00, SII=0x7C)\n\t\tself.__readSDOBufferHigh()\n\t\tlockbits = int2byte(self.top.cmdReadBufferReg8() | unusedBitsMask)\n\t\tself.progressMeterFinish()\n\t\treturn lockbits\n\n\tdef writeLockbits(self, image):\n\t\tif len(image) != roundup(self.nrLockBits, 8) // 8:\n\t\t\tself.throwError(\"Invalid Lockbits image size %d (expected %d)\" %\\\n\t\t\t\t(len(image), roundup(self.nrLockBits, 8) // 8))\n\t\tusedBitsMask = (1 << self.nrLockBits) - 1\n\t\tself.__enterPM()\n\t\tself.progressMeterInit(\"Writing lockbits\", 0)\n\t\tself.__sendInstr(SDI=0x20, SII=0x4C)\n\t\tself.__sendInstr(SDI=(byte2int(image[0]) & usedBitsMask), SII=0x2C)\n\t\tself.__sendInstr(SDI=0x00, SII=0x64)\n\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\tself.__waitHighSDO()\n\t\tself.progressMeterFinish()\n\n\tdef __readSignature(self):\n\t\tself.__sendInstr(SDI=0x08, SII=0x4C)\n\t\tfor i in range(0, 3):\n\t\t\tself.__sendInstr(SDI=i, SII=0x0C)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x68)\n\t\t\tself.__sendInstr(SDI=0x00, SII=0x6C)\n\t\t\tself.__readSDOBufferHigh()\n\t\treturn self.top.cmdReadBufferReg()[0:3]\n\n\tdef __enterPM(self):\n\t\t\"Enter HV programming mode.\"\n\t\tself.applyVCC(False)\n\t\tself.applyVPP(False)\n\t\tself.applyGND(False)\n\t\tself.top.cmdSetVCCVoltage(5)\n\t\tself.top.cmdSetVPPVoltage(0)\n\t\tself.top.cmdSetVPPVoltage(12)\n\t\tself.applyGND(True)\n\t\tself.applyVCC(True)\n\n\t\tself.__setPins(SCI=0, SDO_en=0, RST_en=1, RST=0)\n\t\tfor i in range(0, 6):\n\t\t\tself.__setPins(SCI=0, SDO_en=0, RST_en=1, RST=0)\n\t\t\tself.__setPins(SCI=1, SDO_en=0, RST_en=1, RST=0)\n\t\tself.__setPins(SCI=0, SDO_en=1, SDO=0, RST_en=1, RST=0)\n\t\tself.top.hostDelay(0.001)\n\t\tself.__setPins(SDO_en=1, SDO=0, RST_en=0)\n\t\tself.applyVPP(True)\n\t\tself.top.hostDelay(0.001)\n\t\tself.__setPins(SDO_en=0)\n\t\tself.top.hostDelay(0.01)\n\n\t\tsignature = self.__readSignature()\n\t\tif signature != self.signature:\n\t\t\tmsg = \"Unexpected device signature. \" +\\\n\t\t\t \"Want %02X%02X%02X, but got %02X%02X%02X\" % \\\n\t\t\t\t(byte2int(self.signature[0]), byte2int(self.signature[1]),\n\t\t\t\t byte2int(self.signature[2]),\n\t\t\t\t byte2int(signature[0]), byte2int(signature[1]),\n\t\t\t\t byte2int(signature[2]))\n\t\t\tif self.top.getForceLevel() >= 1:\n\t\t\t\tself.printWarning(msg)\n\t\t\telse:\n\t\t\t\tself.throwError(msg)\n\n\tdef __sendReadEEPROMInstr(self):\n\t\tself.__sendInstr(SDI=0x03, SII=0x4C)\n\n\tdef __sendWriteEEPROMInstr(self):\n\t\tself.__sendInstr(SDI=0x11, SII=0x4C)\n\n\tdef __sendReadFlashInstr(self):\n\t\tself.__sendInstr(SDI=0x02, SII=0x4C)\n\n\tdef __sendWriteFlashInstr(self):\n\t\tself.__sendInstr(SDI=0x10, SII=0x4C)\n\n\tdef __sendNOP(self):\n\t\tself.__sendInstr(SDI=0x00, SII=0x4C)\n\n\tdef __sendInstr(self, SDI, SII):\n\t\tself.__setSDI(SDI)\n\t\tself.__setSII(SII)\n\t\tself.__loadCommand(self.PROGCMD_SENDINSTR)\n\t\t# We do not poll the busy flag, because that would result\n\t\t# in a significant slowdown. We delay long enough for the\n\t\t# command to finish execution, instead.\n\t\tself.top.hostDelay(0.001)\n\n\tdef __setSDI(self, sdi):\n\t\tself.top.cmdFPGAWrite(0x13, sdi & 0xFF)\n\n\tdef __setSII(self, sii):\n\t\tself.top.cmdFPGAWrite(0x14, sii & 0xFF)\n\n\tdef __loadCommand(self, command):\n\t\tself.top.cmdFPGAWrite(0x12, command & 0xFF)\n\n\tdef __runCommandSync(self, command):\n\t\tself.__loadCommand(command)\n\t\tself.__busyWait()\n\n\tdef __setPins(self, SCI=0, SDO_en=0, SDO=0, RST_en=0, RST=0):\n\t\tdata = 0\n\t\tif SCI:\n\t\t\tdata |= 1\n\t\tif SDO_en:\n\t\t\tdata |= 2\n\t\tif SDO:\n\t\t\tdata |= 4\n\t\tif RST_en:\n\t\t\tdata |= 8\n\t\tif RST:\n\t\t\tdata |= 16\n\t\tself.top.cmdFPGAWrite(0x15, data)\n\n\tdef __getStatusFlags(self):\n\t\tself.top.cmdFPGARead(0x12)\n\t\tstat = self.top.cmdReadBufferReg()\n\t\treturn byte2int(stat[0])\n\n\tdef __readSDOBufferHigh(self):\n\t\tself.top.cmdFPGARead(0x10)\n\n\tdef __rawSDOState(self):\n\t\treturn bool(self.__getStatusFlags() & self.STAT_SDO)\n\n\tdef __busy(self):\n\t\treturn bool(self.__getStatusFlags() & self.STAT_BUSY)\n\n\tdef __busyWait(self):\n\t\tfor i in range(0, 100):\n\t\t\tif not self.__busy():\n\t\t\t\treturn\n\t\t\tself.top.hostDelay(0.01)\n\t\tself.throwError(\"Timeout in busywait.\")\n\n\tdef __waitHighSDO(self):\n\t\tfor i in range(0, 100):\n\t\t\tif self.__rawSDOState():\n\t\t\t\treturn\n\t\t\tself.top.hostDelay(0.01)\n\t\tself.throwError(\"Timeout waiting for SDO.\")\n","repo_name":"mbuesch/toprammer","sub_path":"libtoprammer/chips/attinyspc_common.py","file_name":"attinyspc_common.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"43470134891","text":"from asyncio import QueueEmpty\n\nfrom pyrogram import Client\nfrom pyrogram.types import Message\n\nfrom .. import queues\nfrom ..callsmusic import callsmusic\nfrom ..helpers.chat_id import get_chat_id\nfrom ..helpers.decorators import authorized_users_only\nfrom ..helpers.decorators import errors\nfrom ..helpers.filters import command\nfrom ..helpers.filters import other_filters\n\n\n@Client.on_message(command('pause') & other_filters)\n@errors\n@authorized_users_only\nasync def pause(_, message: Message):\n (\n await message.reply_text('⏸ متوقف ... ', False)\n ) if (\n callsmusic.pause(get_chat_id(message.chat))\n ) else (\n await message.reply_text('👋🏻لايوجد شيئ تم تشغيلة ', False)\n )\n\n\n@Client.on_message(command('resume') & other_filters)\n@errors\n@authorized_users_only\nasync def resume(_, message: Message):\n (\n await message.reply_text('▶️ استئناف ... ', False)\n ) if (\n callsmusic.resume(get_chat_id(message.chat))\n ) else (\n await message.reply_text('⚠️ لم يتم ايقاف شيئ ... ', False)\n )\n\n\n@Client.on_message(command('stop') & other_filters)\n@errors\n@authorized_users_only\nasync def stop(_, message: Message):\n chat_id = get_chat_id(message.chat)\n if chat_id not in callsmusic.active_chats:\n await message.reply_text('لا توجد اغنية لايقافها .. ⚠️', False)\n else:\n try:\n queues.clear(chat_id)\n except QueueEmpty:\n pass\n await callsmusic.stop(chat_id)\n await message.reply_text('⏹ تم ايقاف البوت ... ', False)\n\n\n@Client.on_message(command('skip') & other_filters)\n@errors\n@authorized_users_only\nasync def skip(_, message: Message):\n chat_id = get_chat_id(message.chat)\n if chat_id not in callsmusic.active_chats:\n await message.reply_text('لا توجد اغنية لايقافها .. ⚠️', False)\n else:\n queues.task_done(chat_id)\n if queues.is_empty(chat_id):\n await callsmusic.stop(chat_id)\n else:\n await callsmusic.set_stream(\n chat_id,\n queues.get(chat_id)['file'],\n )\n await message.reply_text('✅ تم ايقاف الاغنية ...', False)\n\n\n@Client.on_message(command('mute') & other_filters)\n@errors\n@authorized_users_only\nasync def mute(_, message: Message):\n result = callsmusic.mute(get_chat_id(message.chat))\n (\n await message.reply_text('✅ تم كتم البوت ...', False)\n ) if (\n result == 0\n ) else (\n await message.reply_text('👋🏻 تم كتمة بلفعل ...', False)\n ) if (\n result == 1\n ) else (\n await message.reply_text('❌ ليس في الدعوة ', False)\n )\n\n\n@Client.on_message(command('unmute') & other_filters)\n@errors\n@authorized_users_only\nasync def unmute(_, message: Message):\n result = callsmusic.unmute(get_chat_id(message.chat))\n (\n await message.reply_text('✅ تم فتح الكتم ', False)\n ) if (\n result == 0\n ) else (\n await message.reply_text('👋🏻 انا غير مكتوم ... ', False)\n ) if (\n result == 1\n ) else (\n await message.reply_text('👋🏻 انا غير مدعو في المحادثة الصوتية ⚠️ ... ', False)\n )\n","repo_name":"TLeMusic/callsmusic","sub_path":"callsmusic/handlers/admins.py","file_name":"admins.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"158044261","text":"from gtts import gTTS #google text to speech\r\nimport speech_recognition as sr #support for various speech recognition engines/APIs\r\nimport os #to interact with os\r\nimport subprocess #open processes\r\nimport re \r\nimport webbrowser #to access browser\r\nimport smtplib\r\nimport requests \r\nimport json\r\nfrom time import ctime\r\nfrom datetime import datetime #to get system time\r\nimport time\r\nimport playsound #to play audio files\r\nimport random #generate random file names\r\nimport wolframalpha #to ask questions and report analysis and generation\r\nfrom geopy.geocoders import Nominatim #to geo-locate addresses\r\n#from weather import Weather\r\n\r\ndef talkToMe(audio):\r\n \"speaks audio passed as argument\"\r\n\r\n jarp_speak(audio)\r\n for line in audio.splitlines():\r\n os.system(\"say \" + audio )\r\n\r\n \r\n # use the system's inbuilt say command instead of mpg123\r\n # text_to_speech = gTTS(text=audio, lang='en')\r\n # text_to_speech.save('audio.mp3')\r\n # os.system('mpg123 audio.mp3')\r\n\r\n\r\n \r\ndef myCommand(ask = False):\r\n \"listens for commands\"\r\n\r\n r = sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n if ask:\r\n jarp_speak(ask)\r\n #print('What can I do for you today, sir?')\r\n r.pause_threshold = 1\r\n r.adjust_for_ambient_noise(source, duration=1)\r\n audio = r.listen(source)\r\n\r\n try:\r\n command = r.recognize_google(audio).lower()\r\n jarp_speak('You said: ' + command + '\\n')\r\n\r\n #loop back to continue to listen for commands if unrecognizable speech is received\r\n except sr.UnknownValueError:\r\n jarp_speak('Didn\\'t get you there sir.')\r\n command = myCommand();\r\n\r\n #if assistant command is not found\r\n except sr.RequestError:\r\n jarp_speak('Sorry sir, my speech service is down.')\r\n\r\n return command\r\n\r\n\r\ndef jarp_speak(audio_string):\r\n tts = gTTS(text = audio_string, lang = 'en')\r\n r = random.randint(1, 10000000)\r\n audio_file = 'audio-' + str(r) + '.mp3'\r\n tts.save(audio_file)\r\n playsound.playsound(audio_file)\r\n print(audio_string)\r\n location = \"D:/Abhishek/AI Assistants\"\r\n path = os.path.join(location, audio_file)\r\n os.remove(path)\r\n\r\n\r\ndef assistant(command):\r\n \"if statements for executing commands\"\r\n\r\n if 'hey jarp' in command or 'hi jarp' in command:\r\n jarp_speak('Here to serve you sir.') \r\n\r\n elif 'who created you' in command or 'who made you' in command:\r\n jarp_speak('A very smart set of people.')\r\n\r\n elif 'what\\'s up' in command or 'what you up to' in command or 'whats up' in command:\r\n jarp_speak('Nothing much, sir. Just waiting to assist you.')\r\n\r\n elif 'what are you' in command or 'tell me about yourself' in command:\r\n jarp_speak('Allow me to introduce myself. I\\'m Jarp. A virtual artificial intelligence. \\nAnd I am here to assist you with a variety of tasks as best as I can, twenty four hours a day, seven days a week. \\n\\nImporting all preferences from home interface. Systems are now fully operational.')\r\n \r\n elif 'what does jarp stand for' in command:\r\n jarp_speak('It stands for Just Another Replaced Person.')\r\n\r\n elif 'you up' in command or 'jarp you there' in command:\r\n jarp_speak('For you sir, always.')\r\n\r\n elif 'thanks' in command:\r\n jarp_speak('Anything for you sir.')\r\n \r\n elif 'that will be all' in command or 'exit' in command:\r\n jarp_speak('Can\\'t wait to assist you sir.')\r\n exit()\r\n\r\n elif 'switchoff PC' in command or 'shutdown PC' in command:\r\n shutdown = myCommand('Do you want to shutdown the PC sir?')\r\n if shutdown == 'no':\r\n exit()\r\n else:\r\n jarp_speak('Shuting down this PC in approximately five seconds sir.')\r\n os.system('shutdown /s /t 1')\r\n\r\n elif 'what\\'s the time' in command or 'what time is it' in command:\r\n now = datetime.now()\r\n time = now.strftime(\"%H:%M:%S\")\r\n jarp_speak(time)\r\n\r\n elif 'what\\'s the date' in command or 'what date is it' in command or 'what\\'s today\\'s date' in command:\r\n now = datetime.now()\r\n date = now.strftime(\"%m/%d/%Y\")\r\n jarp_speak(date)\r\n\r\n elif 'what day is it' in command:\r\n #now = datetime.now()\r\n #day = now.strftime()\r\n tday = datetime.date.today()\r\n daytoday = tday.ctime()\r\n jarp_speak(daytoday)\r\n\r\n elif 'i love you' in command:\r\n jarp_speak('I love you 3000')\r\n\r\n elif 'don\\'t let me down' in command:\r\n jarp_speak('I would never sir.')\r\n\r\n elif 'search' in command:\r\n search = myCommand('What do you want me to search for?')\r\n url = 'https://google.com/search?q=' + search\r\n webbrowser.get().open(url)\r\n jarp_speak('Here is what I found for ' + search)\r\n\r\n elif 'location' in command:\r\n location = myCommand('What location do you need, sir?')\r\n url = 'https://google.nl/maps/place/' + location + '/&'\r\n webbrowser.get().open(url)\r\n jarp_speak('Here is the location: ' + location)\r\n\r\n elif 'find me this address' in command or 'find me an address' in command:\r\n find_address = myCommand('Please state the address sir.')\r\n geolocator = Nominatim(user_agent = \"Veronica\")\r\n location = geolocator.geocode(find_address)\r\n jarp_speak('Here\\'s the address sir: ' + location.address)\r\n jarp_speak('The latitude is: ' + location.latitude + ' and longitude is: ' + location.longitude)\r\n\r\n elif 'open google' in command:\r\n reg_ex = re.search('open google (.*)', command)\r\n url = 'https://www.google.com/'\r\n if reg_ex:\r\n google = reg_ex.group(1)\r\n url = url + 'r/' + google\r\n webbrowser.open(url)\r\n jarp_speak('Here you go, sir.')\r\n\r\n elif 'open youtube' in command:\r\n reg_ex = re.search('open youtube (.*)', command)\r\n url = 'https://www.youtube.com/'\r\n if reg_ex:\r\n youtube = reg_ex.group(1)\r\n url = url + 'r/' + youtube\r\n webbrowser.open(url)\r\n jarp_speak('Here you go, sir.')\r\n\r\n elif 'open website' in command:\r\n reg_ex = re.search('open website (.+)', command)\r\n if reg_ex:\r\n domain = reg_ex.group(1)\r\n url = 'https://www.' + domain\r\n webbrowser.open(url)\r\n jarp_speak('Here you go, sir.')\r\n else:\r\n pass\r\n\r\n elif 'question' in command:\r\n question = myCommand('What question do you have for me sir?')\r\n jarp_speak(question)\r\n app_id = \"AX5Y26-KXAGRT3QP5\"\r\n client = wolframalpha.Client(app_id)\r\n res = client.query(question)\r\n if res['@success'] == 'false':\r\n jarp_speak('Not resloved')\r\n else:\r\n pod0 = res['pod'][0]['subpod']['plaintext']\r\n jarp_speak(pod0)\r\n # pod[1] may contains the answer\r\n pod1 = res['pod'][1]\r\n # checking if pod1 has primary=true or title=result|definition\r\n if (('definition' in pod1['@title'].lower()) or ('result' in pod1['@title'].lower()) or (pod1.get('@primary','false') == 'true')):\r\n # extracting result from pod1\r\n result = pod1['subpod']['plaintext']\r\n jarp_speak(result)\r\n\r\n elif 'joke' in command:\r\n res = requests.get(\r\n 'https://icanhazdadjoke.com/',\r\n headers={\"Accept\":\"application/json\"}\r\n )\r\n if res.status_code == requests.codes.ok:\r\n jarp_speak(str(res.json()['joke']))\r\n else:\r\n jarp_speak('Oops! I ran out of jokes')\r\n\r\n elif 'open calculator' in command:\r\n subprocess.Popen('C:\\\\Windows\\\\System32\\\\calc.exe')\r\n\r\n elif 'open notepad' in command:\r\n subprocess.Popen('C:\\\\Windows\\\\System32\\\\notepad.exe')\r\n\r\n elif 'open wordpad' in command:\r\n subprocess.Popen('C:\\\\Windows\\\\System32\\\\write.exe')\r\n\r\n #elif 'current weather in' in command:\r\n # reg_ex = re.search('current weather in (.*)', command)\r\n # if reg_ex:\r\n # city = reg_ex.group(1)\r\n # weather = Weather()\r\n # location = weather.lookup_by_location(city)\r\n # condition = location.condition()\r\n # jarp_speak('The Current weather in %s is %s The tempeture is %.1f degree' % (city, condition.text(), (int(condition.temp())-32)/1.8))\r\n\r\n #elif 'weather forecast in' in command:\r\n # reg_ex = re.search('weather forecast in (.*)', command)\r\n # if reg_ex:\r\n # city = reg_ex.group(1)\r\n # weather = Weather()\r\n # location = weather.lookup_by_location(city)\r\n # forecasts = location.forecast()\r\n # for i in range(0,3):\r\n # jarp_speak('On %s will it %s. The maximum temperture will be %.1f degree.'\r\n # 'The lowest temperature will be %.1f degrees.' % (forecasts[i].date(), forecasts[i].text(), (int(forecasts[i].high())-32)/1.8, (int(forecasts[i].low())-32)/1.8))\r\n\r\n elif 'current weather' in command:\r\n api_key='30f82e5c879f2ac7af9fc8919e3e041a'\r\n base_url='https://api.openweathermap.org/data/2.5/weather?'\r\n jarp_speak('What\\'s the city name?')\r\n city_name=myCommand()\r\n complete_url=base_url+\"appid=\"+api_key+\"&q=\"+city_name\r\n response = requests.get(complete_url)\r\n x=response.json()\r\n if x[\"cod\"]!=\"404\":\r\n y=x[\"main\"]\r\n current_temperature = y[\"temp\"] - 273.15\r\n current_humidiy = y[\"humidity\"]\r\n z = x[\"weather\"]\r\n weather_description = z[0][\"description\"]\r\n jarp_speak(\"Temperature in \" + city_name +\" in degree celsius is \" +\r\n str(current_temperature) +\r\n \"\\nhumidity in percentage is \" +\r\n str(current_humidiy) +\r\n \"\\ndescription \" +\r\n str(weather_description))\r\n else:\r\n jarp_speak('City Not Found.')\r\n\r\n elif 'news' in command:\r\n news = webbrowser.open_new_tab(\"https://timesofindia.indiatimes.com/home/headlines\")\r\n jarp_speak('Here are some headlines from the Times of India sir.')\r\n\r\n elif 'email' in command:\r\n jarp_speak('Who is the recipient?')\r\n recipient = myCommand()\r\n\r\n if 'brian' in recipient:\r\n jarp_speak('What should I say?')\r\n content = myCommand()\r\n\r\n #init gmail SMTP\r\n mail = smtplib.SMTP('smtp.gmail.com', 587)\r\n\r\n #identify to server\r\n mail.ehlo()\r\n\r\n #encrypt session\r\n mail.starttls()\r\n\r\n #login\r\n mail.login('username', 'password')\r\n\r\n #send message\r\n mail.sendmail('brian hunt', 'brian@protonmail.com', content)\r\n\r\n #end mail connection\r\n mail.close()\r\n\r\n jarp_speak('Email sent.')\r\n\r\n else:\r\n jarp_speak('I don\\'t know what you mean!')\r\n\r\n\r\njarp_speak('Alive and ready for you sir.')\r\n\r\n#loop to continue executing multiple commands\r\nwhile True:\r\n assistant(myCommand())\r\n","repo_name":"iicemann/Jarp-Virtual-Assistant","sub_path":"Jarp.py","file_name":"Jarp.py","file_ext":"py","file_size_in_byte":11238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21466155466","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n## NAVER \n# 페이지 이동\ndef NAVER(browser) : \n url = \"https://recruit.navercorp.com/naver/job/list/developer?searchSysComCd=&entTypeCd=001&searchTxt=\"\n browser.get(url)\n\n soup = BeautifulSoup(browser.page_source, \"lxml\")\n recruits = soup.find_all(\"span\", attrs={\"class\":\"list_con\"})\n\n print(\"\\n\\nnaver career recruit 신입/개발 직군\")\n print(\"링크 : \", url)\n print(\"\\n\")\n for recruit in recruits:\n print(\"-\" * 100)\n print(recruit.strong.get_text())\n print(recruit.em.get_text())\n print()\n\n\ndef SKKU(browser) : \n url = \"https://job.skku.edu/login.aspx?redir=/loginproc.aspx%3fredir2%3d%2fdefault.aspx%3f\"\n browser.get(url)\n\n browser.find_element_by_xpath('//*[@id=\"user_id\"]').send_keys(\"ididididid\")\n browser.find_element_by_xpath('//*[@id=\"user_pw\"]').send_keys(\"******\")\n browser.find_element_by_class_name(\"loginButton\").click()\n browser.find_element_by_xpath('//*[@id=\"interestFrm\"]/div[2]/button').click()\n\n time.sleep(1)\n elem = browser.find_element_by_xpath('//*[@id=\"interestJobList\"]')\n # elem = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"interestJobList\"]')))\n\n print(elem.text)\n\n\nimport time\n\nif __name__ == '__main__':\n\n options = webdriver.ChromeOptions()\n options.headless = True\n options.add_argument(\"window-size=1920x1080\")\n options.add_argument(\"window-size=1920x1080\")\n options.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36\" )\n options.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n\n browser = webdriver.Chrome(\"chromedriver_win32/chromedriver.exe\",options=options)\n browser.maximize_window()\n \n NAVER(browser)\n time.sleep(1)\n SKKU(browser)\n browser.quit()\n","repo_name":"ndy2/web_crawling_tutorial","sub_path":"NadoCoding/webscapping_basic/career_info_scrapping/scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12528391234","text":"from copy import deepcopy\n\nfrom django.db.transaction import atomic\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import NotFound\n\nfrom vgloss import models, actions\n\n\nclass ActionSerializer(serializers.Serializer):\n\n def to_internal_value(self, data):\n class_name = data.get(\"type\")\n if not class_name:\n raise serializers.ValidationError(\"Action type is required\")\n try:\n cls = actions.ACTION_CLASSES[class_name]\n except KeyError as e:\n raise serializers.ValidationError(\"Invalid action type given\") from e\n return cls(**data[\"data\"])\n\nclass FileSerializer(serializers.ModelSerializer):\n tags = serializers.ReadOnlyField(source=\"tag_ids\")\n\n class Meta:\n model = models.File\n fields = [\n \"hash\", \"name\", \"is_image\", \"timestamp\", \"tags\",\n #\"size\",\n ]\n\nclass FileDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.File\n fields = [\n \"mimetype\", \"metadata\", \"paths\",\n ]\n\nclass TagListSerializer(serializers.ListSerializer):\n\n def to_internal_value(self, data):\n data = deepcopy(data)\n\n # If the frontend sends a string ID, it is a temporary ID. Remove it\n # here so it will validate. Later, in save(), we'll look at\n # initial_data to resolve the temporary IDs in the parent links to real\n # ones.\n for tag in data:\n if isinstance(tag.get(\"id\"), str):\n del tag[\"id\"]\n if isinstance(tag.get(\"parent\"), str):\n tag[\"parent\"] = None\n\n return super().to_internal_value(data)\n\n def save(self, delete_unseen=False, ignore_not_found=False):\n with atomic():\n tag_ids = [t.get(\"id\") for t in self.validated_data]\n tags_qs = models.Tag.objects.filter(id__in=tag_ids)\n tag_map = {tag.id: tag for tag in tags_qs}\n\n # Perform creation / update\n saved_tags = []\n for tag_data in self.validated_data:\n tag_id = tag_data.get(\"id\")\n if tag_id is None:\n tag = None\n else:\n tag = tag_map.get(tag_id)\n if tag is None:\n if not ignore_not_found:\n raise NotFound(f'ID {tag_id} not found', 404)\n else:\n continue\n if tag is None:\n tag = self.child.create(tag_data)\n else:\n self.child.update(tag, tag_data)\n saved_tags.append(tag)\n\n # Handle temporary IDs\n # Strings in id or parent fields are temporary IDs. They are\n # present in initial_data but removed in validation. We take a\n # 2-pass approach to saving tags that used temporary IDs to\n # reference their parent.\n #\n # 1) Get a mapping of temp IDs to the real ID that was saved in the\n # database.\n temp_id_map = {}\n for tag, initial_tag_data in zip(saved_tags, self.initial_data):\n temp_tag_id = initial_tag_data.get(\"id\")\n if isinstance(temp_tag_id, str):\n temp_id_map[temp_tag_id] = tag.id\n # 2) Find tags with temp IDs in their parent field and re-save them\n # with the real ID.\n for tag, initial_tag_data in zip(saved_tags, self.initial_data):\n temp_parent_id = initial_tag_data.get(\"parent\")\n if isinstance(temp_parent_id, str):\n tag.parent_id = temp_id_map[temp_parent_id]\n tag.save()\n\n # Delete unseen tags\n # Do this last, since we want to be sure the delete cascades to\n # everything needed.\n if delete_unseen:\n saved_tag_ids = [t.id for t in saved_tags]\n unseen_tags = models.Tag.objects.exclude(id__in=saved_tag_ids)\n unseen_tags.delete()\n\n def delete(self):\n with atomic():\n tag_ids = [t.get(\"id\") for t in self.initial_data]\n tags_qs = models.Tag.objects.filter(id__in=tag_ids)\n tags_qs.delete()\n\nclass TagSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(required=False)\n\n class Meta:\n model = models.Tag\n fields = [\"id\", \"name\", \"parent\"]\n list_serializer_class = TagListSerializer\n\nclass FileTagListSerializer(serializers.ListSerializer):\n\n def save(self):\n for filetag in self.validated_data:\n models.FileTag.objects.get_or_create(\n file_id=filetag[\"file_hash\"],\n tag_id=filetag[\"tag_id\"],\n )\n\n def delete(self):\n for filetag in self.validated_data:\n models.FileTag.objects.filter(\n file_id=filetag[\"file_hash\"],\n tag_id=filetag[\"tag_id\"],\n ).delete()\n\nclass FileTagSerializer(serializers.ModelSerializer):\n file = serializers.CharField(source=\"file_hash\")\n tag = serializers.IntegerField(source=\"tag_id\")\n\n class Meta:\n model = models.FileTag\n fields = [\"file\", \"tag\"]\n list_serializer_class = FileTagListSerializer\n","repo_name":"mbrown1413/vgloss","sub_path":"vgloss/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15447587305","text":"\"\"\"\nIrti Haq and Charlie Norgaard\nCSE 163 Final Project\nThis file contains functions for generating plots using\nthe dataframes created in data_processing.py.\n\"\"\"\n\nimport geopandas as gpd\nimport plotly.express as px\n\n\ndef time_plot(df, var_x, var_y):\n \"\"\"\n This Function takes in a Dataframe and a Variable for X and for Y\n This function creates interactive scatter plot using the given dataframe\n and variables. The plot includes a slider to adjust the year.\n It then returns a plotly figure\n \"\"\"\n fig = px.scatter(df, x=var_x, y=var_y, trendline=\"ols\",\n animation_frame=\"Year\",\n animation_group='Country',\n hover_name='Country')\n return fig\n\n\ndef all_country_plot(df, var_x, var_y):\n \"\"\"\n This Function takes in a Dataframe and a Variable for X and for Y and then\n This function creates a interactive scatter plot using the given dataframe\n and variables. The countries in this plot are color coordinated.\n It then returns a plotly figure\n \"\"\"\n fig = px.scatter(df, x=var_x, y=var_y,\n trendline=\"ols\", animation_frame=\"Year\",\n animation_group='Country', hover_name='Country',\n color='Country')\n return fig\n\n\ndef select_country_plot(df, var_x, var_y):\n \"\"\"\n This Function takes in a Dataframe and a Variable for X and for Y and then\n This function creates interactive scatter plot using the given dataframe\n and variables.\n The countries in this plot are color coordinated and can be included/\n removed from the plot at the users discretion.\n It then returns a plotly figure\n \"\"\"\n fig = px.scatter(df, x=var_x, y=var_y,\n trendline=\"ols\", hover_name='Year', color='Country')\n return fig\n\n\ndef corr_plot(df):\n \"\"\"\n This Function takes in a Dataframe with R^2 values and then\n This function creates a plot depicting the correlation between variables\n according the the calculated R^2 value by country.\n It then returns a plotly figure\n \"\"\"\n fig = px.bar(df, x=\"Country\", y=\"R Square\", color='Correlation',\n hover_name='Country')\n return fig\n\n\ndef map_dataset(df, var_size, var_color, countries_shp):\n \"\"\"\n Takes in a Dataframe, a variable to be encode to size of the points on the\n plot, a variable to encoded to the color of the points on the plot, and a\n shape file with the shapes of the countries and then in\n This function creates a geopandas dataframe plot using the given dataframe\n and shape file.\n The size of the points are dictated by the given variables var_size and\n color by var_color. It then returns and tupple with the a new geodataframe,\n a mask labeling positive and negetive var_size values, var_size, and\n var_color\n \"\"\"\n net_df_geo = df.merge(countries_shp,\n left_on='iso3',\n right_on='iso3',\n how='left')\n\n net_df_geo = gpd.GeoDataFrame(net_df_geo, geometry='geometry')\n\n net_df_geo['geometry'] = net_df_geo['geometry'].representative_point()\n\n mask = net_df_geo[var_size] < 0\n net_df_geo[var_size] = abs(net_df_geo[var_size])\n\n net_df_geo['lon'] = net_df_geo['geometry'].x\n net_df_geo['lat'] = net_df_geo['geometry'].y\n\n return (net_df_geo, mask, var_size, var_color)\n\n\ndef plot_map(df, var_color, var_size):\n \"\"\"\n This function takes in a dataframe, a variable to be encode to size of the\n points on the plot, a variable to encoded to the color of the points\n on the plot. It then creates an interactive map plot using the\n dataframe generated from map_dataset and returns a plotly figure\n \"\"\"\n px.set_mapbox_access_token(\"pk.eyJ1IjoiaXJ0aSIsImEiOiJjbDN0aGd0dGkwOWQ4M2JxcHd5eThjMzNxIn0.pOpdYEBWEtOuCB5WrwkjiA\")\n map = px.scatter_mapbox(df,\n lat=\"lat\",\n lon=\"lon\",\n color=var_color,\n size=var_size,\n color_continuous_scale=px.colors.sequential.Plasma,\n zoom=1,\n mapbox_style='satellite-streets',\n custom_data=[var_color, var_size, 'Country'])\n\n return map\n\n\n# should be in main method of test file\ndef plot_decr_map(df_mask):\n \"\"\"\n This funtion takes in a tupple with a geodataframe, a mask, var_color,\n var_size and it then filters the df based on the mask and returns\n a tuple with the filtered dataframe, var_color, and var_size\n \"\"\"\n map_df, mask, var_size, var_color = df_mask\n\n map_df = map_df.loc[mask, ['Country', var_color, var_size, 'lon', 'lat']]\n return plot_map(map_df, var_color, var_size)\n\n\ndef plot_no_decr_map(df_mask):\n \"\"\"\n This funtion takes in a tupple with a geodataframe, a mask, var_color,\n var_size and it then filters the df based vals not on the mask and returns\n a tuple with the filtered dataframe, var_color, and var_size\n \"\"\"\n map_df, mask, var_size, var_color = df_mask\n\n map_df = map_df.loc[~mask, ['Country', var_color, var_size, 'lon', 'lat']]\n return plot_map(map_df, var_color, var_size)\n","repo_name":"IrtiHaq/CSE-163_Final","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2402268037","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mai 11, 2018\n@author: ThiefOfTime\n\"\"\"\n\n# PySide imports\ntry:\n from PySide.QtGui import QMainWindow\nexcept ModuleNotFoundError:\n from PySide2.QtWidgets import QMainWindow\n\nfrom fridayUI.weekly_add_input_dialoq import WeeklyAddInputDialoq\n\n\nclass MealInputDialog(QMainWindow, WeeklyAddInputDialoq):\n\n def __init__(self, window_name, message_text, name_text, links_text, buttons_text, return_func):\n super(MealInputDialog, self).__init__()\n self.setupUi(self)\n\n self.__return_function = return_func\n\n self.setWindowTitle(window_name)\n self.message_lb.setText(message_text)\n self.name_lb.setText(name_text)\n self.links_lb.setText(links_text)\n self.cancel_bt.setText(buttons_text[0])\n self.accept_bt.setText(buttons_text[1])\n\n # buttons\n self.cancel_bt.clicked.connect(self.cancel)\n self.accept_bt.clicked.connect(self.accept)\n\n def change_settings(self, window_name, message_text, name_text, links_text, buttons_text, return_function):\n '''\n changes the window settings\n :param window_name:\n :param message_text:\n :param name_text:\n :param links_text:\n :param buttons_text:\n :return:\n '''\n self.setWindowTitle(window_name)\n self.message_lb.setText(message_text)\n self.name_lb.setText(name_text)\n self.links_lb.setText(links_text)\n self.cancel_bt.setText(buttons_text[0])\n self.accept_bt.setText(buttons_text[1])\n self.__return_function = return_function\n\n def accept(self):\n '''\n accept function\n :return:\n '''\n list_items = []\n for i in range(self.links_lw.count()):\n list_items.append(self.links_lw.item(i))\n self.__return_function(self.name_le.text(), list_items)\n self.close()\n\n def cancel(self):\n '''\n cancel function\n :return:\n '''\n self.__return_function(None, None)\n self.close()\n\n","repo_name":"ThiefOfTime/KitchenOrganisator","sub_path":"src/fridayUI/weekly_add_input_dialog_func.py","file_name":"weekly_add_input_dialog_func.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21319198258","text":"import vs\n\nhoistClassPrefix = \"20 Rigging-28_Hoist_\"\n\ndef changeClass(h): \n\thoistType = vs.GetRField(h,\"HoistVW\",\"Type\")\n\thoistFunc = vs.GetRField(h,\"HoistVW\",\"Function\")\n\t\n\tclassName = hoistClassPrefix + hoistFunc + \" \" + hoistType\n\t\n\tvs.SetClass(h,className)\n\treturn()\n\nactiveClass = vs.ActiveClass()\n\nhoistCrit = \"(R IN ['HoistVW'])\"\n\nvs.ForEachObject(changeClass,hoistCrit)\n\nvs.NameClass(activeClass)\n","repo_name":"brageiversen/vectorworksPlugins","sub_path":"ClassPlugins/ClassHoistTypeAndLoad.py","file_name":"ClassHoistTypeAndLoad.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15580976246","text":"import itertools\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\nimport concurrent.futures\r\nimport xlrd\r\n\r\n\r\n'''alpha_list = ['A', 'B', 'C', 'D', 'E', 'F','G', 'H', 'I', 'J', 'K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']\r\n\r\n\r\ndef get_links(alphabet):\r\n print(f\"Getting {alphabet} numbers\")\r\n page = requests.get(f'https://www.motscroises.fr/lettre/{alphabet}').text\r\n soup = BeautifulSoup(page, 'html.parser')\r\n main_div = soup.find('div', class_='search-result-box')\r\n lis = main_div.findAll('li')\r\n semis = []\r\n link_append = []\r\n for li in lis:\r\n a_tag = li.find('a')\r\n brack = a_tag.text\r\n bra = brack.split('-')\r\n semi = bra[0]\r\n semis.append(semi)\r\n for se in semis:\r\n new = se.split('[')\r\n print(new)\r\n news = new[1]\r\n link_append.append(news[1:])\r\n\r\n return link_append\r\n\r\n\r\nquestions_links = []\r\nquestions_links = questions_links[4134:]\r\n\r\ndef get_questions(alph, link):\r\n print(f\"Getting {alph} {link} questions\")\r\n global questions_links\r\n page = requests.get(f'https://www.motscroises.fr/lettre/{alph}/{link}').text\r\n soup = BeautifulSoup(page, 'html.parser')\r\n ques = soup.findAll('div', class_='questions')\r\n for que in ques:\r\n asn = que.text\r\n print(asn)\r\n questions_links.append(asn.upper())\r\n\r\n\r\nfull_link_list = []\r\n\r\n\r\nwith concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\r\n result = executor.map(get_links, alpha_list)\r\n full_link_list.append(result)\r\n\r\n\r\nmerged = list(itertools.chain(*full_link_list))\r\n\r\nfor index, alpha in enumerate(alpha_list):\r\n for number in merged[index]:\r\n get_questions(alpha, number)\r\n\r\n\r\nheader_added = False\r\ni = 39802\r\n\r\n'''\r\nfile_location = \"Urls.xlsx\"\r\nworkbook = xlrd.open_workbook(file_location)\r\nsheet = workbook.sheet_by_index(0)\r\nall_links = []\r\nfor row in range(1, 10001):\r\n all_links.append(sheet.cell_value(row,0))\r\n\r\ni = 0\r\ndef get_solution(url):\r\n global header_added, i\r\n page = requests.get(url).text\r\n soup = BeautifulSoup(page, 'html.parser')\r\n ques_div = soup.find('p', class_='header-description')\r\n ques = ques_div.find('span').text\r\n ans_divs = soup.findAll('div', class_='puzzle-solution')\r\n ans = ans_divs[0].text\r\n print(\"Solution \", i)\r\n i += 1\r\n dict1 ={\"Words\": ques, \"Solution\": ans}\r\n with open('Puzzle10k.csv', 'a+', encoding='utf-8') as f:\r\n w = csv.DictWriter(f, dict1.keys())\r\n if not header_added:\r\n w.writeheader()\r\n header_added = True\r\n w.writerow(dict1)\r\n\r\n\r\n\r\nwith concurrent.futures.ThreadPoolExecutor(max_workers=7) as executor:\r\n result = executor.map(get_solution, all_links)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"abhishekrai43/Python-Scraping-Scripts","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34019987440","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport math\n\nx = np.linspace(0,3*math.pi,200)\ny1,y2 = np.sin(x),np.cos(x)\n\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False #解决负数坐标显示问题\n\nplt.figure\nplt.plot(x,y1,color='red', linewidth=1.5,linestyle='-',label=r'$sin(t)$')\nplt.plot(x,y2,color='blue',linewidth=1.5,linestyle='-',label=r'$cos(t)$')\nplt.title('插森')\nplt.xlabel('森哥凉了')\nplt.ylabel('森哥挂了')\nplt.legend(loc = 'best',frameon = False)\nplt.grid()\nplt.show()\n","repo_name":"liutianxu171101/old_python_programs_on_windows","sub_path":"画图.py","file_name":"画图.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34774629278","text":"# Copyright 2020-2021 Wojciech Wideł \r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\n\r\n'''\r\nThe functions runningTimesUkraineVSSegrid, efficienciesSegrid and runtimeWRTTargetSteps parse\r\nthe text files storing tests' results and create appropriate dictionaries.\r\n\r\nWithin the createBoxPlots function, plotBoxPlot is applied to these dictionaries to generate\r\nboxplots. The boxplots are saved as .jpg files.\r\n'''\r\n\r\n\r\ndef runningTimesUkraineVSSegrid(N=20):\r\n ''''\r\n the following mapping is used:\r\n\r\n 1 = U5f\r\n 2 = U5o\r\n 3 = U5fo\r\n 4 = Sf\r\n 5 = So\r\n 6 = Sfo\r\n '''\r\n runningTimes = {i: [] for i in range(1,7)}\r\n for i in range(N):\r\n if i < 15:\r\n with open('newTestsResults_Ukraine_{}.txt'.format(i)) as f:\r\n contents = f.readlines()\r\n # three blocks\r\n for k in range(3):\r\n j = 9*k # first line of the block is line contents[j]; the last one is contents[j+6]\r\n block = contents[j:j+8]\r\n runtime = float(block[6].strip())\r\n runningTimes[k+1].append(runtime)\r\n if i < 19:\r\n with open('TestsResults_SEGRID_{}.txt'.format(i)) as f:\r\n contents = f.readlines()\r\n # three blocks\r\n for k in range(3):\r\n j = 8 * k # first line of the block is line contents[j]; the last one is contents[j+6]\r\n block = contents[j:j + 7]\r\n runtime = float(block[5].strip())\r\n runningTimes[k + 4].append(runtime)\r\n # relabel keys\r\n runningTimes['U5f'] = runningTimes.pop(1)\r\n runningTimes['U5o'] = runningTimes.pop(2)\r\n runningTimes['U5fo'] = runningTimes.pop(3)\r\n runningTimes['Sf'] = runningTimes.pop(4)\r\n runningTimes['So'] = runningTimes.pop(5)\r\n runningTimes['Sfo'] = runningTimes.pop(6)\r\n\r\n ukr_avg_median = 0\r\n seg_avg_median = 0\r\n for item in runningTimes:\r\n med = np.median(runningTimes[item])\r\n print(item, med)\r\n if 'U' in item:\r\n ukr_avg_median += med\r\n else:\r\n seg_avg_median += med\r\n print('avg medians:')\r\n print('ukraine: {}'.format(ukr_avg_median/3))\r\n print('segrid: {}'.format(seg_avg_median / 3))\r\n # for item in runningTimes:\r\n # runningTimes[item].sort()\r\n # median = np.median(runningTimes[item])\r\n # numb_of_less_than_median = len([x for x in runningTimes[item] if x < median])\r\n # print(item, median, max(runningTimes[item]), numb_of_less_than_median)\r\n # print('the above is about running times\\n')\r\n return runningTimes\r\n\r\ndef efficienciesSegrid(N=19):\r\n '''\r\n 1 = Sf\r\n 2 = So\r\n 3 = Sfo\r\n '''\r\n effs = {i: [] for i in range(1, 4)}\r\n for i in range(N):\r\n with open('TestsResults_SEGRID_{}.txt'.format(i)) as f:\r\n contents = f.readlines()\r\n # three blocks\r\n for k in range(3):\r\n j = 8 * k # first line of the block is line contents[j]; the last one is contents[j+6]\r\n block = contents[j:j + 7]\r\n efficiency = float(block[4].strip())\r\n effs[k + 1].append(efficiency)\r\n effs['Sf'] = effs.pop(1)\r\n effs['So'] = effs.pop(2)\r\n effs['Sfo'] = effs.pop(3)\r\n return effs\r\n\r\ndef runtimeWRTTargetSteps():\r\n dirs = ['exp_U5f_{}targets'.format(i) for i in [2, 4, 6, 8]]\r\n numberOfTargets = {dirs[i]: 2*i+2 for i in range(4)}\r\n runtimesToPlot = {dir: [] for dir in dirs}\r\n graphSizes = {dir: [] for dir in dirs}\r\n for dir in dirs:\r\n for fileName in [f for f in os.listdir(dir) if f.endswith('.txt')]:\r\n # get sizes of graphs\r\n if fileName == 'graph_sizes.txt':\r\n with open(dir + '/' + fileName, 'r') as f:\r\n # look for lines likes 'NUMBER OF NODES IN THE GRAPH: 88'. HEHE.\r\n for line in f.readlines():\r\n if 'NUMBER OF NODES IN THE GRAPH' in line:\r\n graphSize = int(line.split(':')[1].strip())\r\n graphSizes[dir].append(graphSize)\r\n else:\r\n # get runtimes\r\n with open(dir + '/' + fileName, 'r') as f:\r\n runtime = round(float(f.readline().strip()), 3)\r\n runtimesToPlot[dir].append(runtime)\r\n # rename keys\r\n for dir in dirs:\r\n runtimesToPlot[numberOfTargets[dir]] = runtimesToPlot.pop(dir)\r\n graphSizes[numberOfTargets[dir]] = graphSizes.pop(dir)\r\n\r\n return runtimesToPlot, graphSizes\r\n\r\n\r\ndef plotBoxPlot(dic, xlabel, ylabel, name, upperLabels = None):\r\n '''\r\n Taken from https://stackoverflow.com/questions/47657651/boxplot-from-dictionary-with-different-length\r\n and modified.\r\n\r\n If upperLabels is not None, then it is expected to be a dictionary with keys the same as the keys in 'dic'.\r\n '''\r\n # Python 3.5+\r\n labels, data = [*zip(*dic.items())] # 'transpose' items to parallel key, value lists\r\n\r\n # or backwards compatible\r\n labels, data = dic.keys(), dic.values()\r\n\r\n fig, ax = plt.subplots()\r\n ax.boxplot(data)\r\n plt.xticks(range(1, len(labels) + 1), labels)\r\n ax.set_xlabel(xlabel)\r\n ax.set_ylabel(ylabel)\r\n ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',\r\n alpha=0.5)\r\n\r\n if upperLabels is not None:\r\n ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',\r\n alpha=0.5)\r\n top = 2150\r\n numBoxes = len(dic)\r\n pos = np.arange(numBoxes) + 1\r\n upLabels = [str(upperLabels[item]) for item in upperLabels]\r\n for tick, label in zip(range(numBoxes), ax.get_xticklabels()):\r\n k = tick % 2\r\n ax.text(pos[tick], top + (top*0.02), upLabels[tick],\r\n horizontalalignment='center', size='x-small')#, weight=weights[k])\r\n ax.text(2.5,top + (top*0.04),'Median graph size', horizontalalignment='center')\r\n\r\n plt.savefig(name+'.jpg', bbox_inches='tight', dpi=300)\r\n return\r\n\r\ndef createBoxPlots():\r\n # runtime vs target steps\r\n plotData, graphSizes = runtimeWRTTargetSteps()\r\n medians = {numberOfSteps: int(np.median(graphSizes[numberOfSteps])) for numberOfSteps in graphSizes}\r\n plotBoxPlot(dic=plotData, xlabel='Number of target attack steps', ylabel = 'Computation time (seconds)', name='boxplotTargetSteps', upperLabels=medians)\r\n #for item in plotData:\r\n # print(item, 'range: {}'.format(max(plotData[item]) - min(plotData[item])))\r\n for item in graphSizes:\r\n print(item, np.median(graphSizes[item]))\r\n\r\n # segrid efficiencies\r\n segridEffs = efficienciesSegrid()\r\n plotBoxPlot(segridEffs, xlabel='Experiment', ylabel='Efficiency score of the result', name='boxplotEfficiencySegrid')\r\n\r\n # running times Ukraine vs SEGRID\r\n runtimes = runningTimesUkraineVSSegrid()\r\n plotBoxPlot(runtimes, xlabel='Experiment', ylabel='Computation time (seconds)', name='boxplotTimeUkraineVSSegrid')\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n createBoxPlots()\r\n","repo_name":"mal-lang/securicad-coa-generator","sub_path":"boxplots.py","file_name":"boxplots.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"32823074539","text":"\"\"\"Drsync.\n\nUsage:\n drsync (-r | --lr ) [--loglevel LEVEL | -p PATH]\n drsync (-s | -S | -f | -F | -l) [--loglevel LEVEL | -p PATH] SYNCLOCAITON\n\nOptions:\n -r --register Register a new sync location\n --lr List registered sync location\n -l --livesync Send local changes in realtime\n -s --send Send local changes\n -S --sendtest Send local changes test only\n -f --fetch Fetch remote changes\n -F --fetchtest Fetch remote changes test only\n\n --loglevel LEVEL Set logging level [default: 20]\n -p PATH --path Override which directory will be synced, default is current directory\n -h --help Show this screen\n\"\"\"\nfrom dotmap import DotMap\n\n__author__ = 'dsheoran'\n\nimport hashlib\nimport logging\nimport os\n\nfrom docopt import docopt\n\nfrom .sync import sync\n\n# Globals\nWK_BASE_DIR = '.drsync'\nWK_SYN_DIR_FORMAT = '{0}/{1}/{2}_{3}'\nWK_RSYNC_FILTER_FILE = '{0}/rsyncfilter'\nWK_CONF_FILE = '{0}/drsync.conf'\nWK_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nUSER_HOME = os.path.abspath(os.environ['HOME'])\n\n\ndef get_file_content(filename):\n \"\"\"\n Returns the content of file. Existence of same file under app directory in user home overrides the default\n :param filename:\n :return: content of file\n \"\"\"\n # check if user have overridden anything\n filepath = \"{}/{}/{}\".format(USER_HOME, WK_BASE_DIR, filename)\n if os.path.exists(filepath):\n return open(filepath).read()\n\n # read default file content\n filepath = \"{}/{}/{}\".format(WK_SCRIPT_DIR, 'data', filename)\n if os.path.exists(filepath):\n return open(filepath).read()\n\n raise RuntimeError(\"Invalid filename given or doesn't exists, filename: {}\".format(filename))\n\n\nclass Drsync:\n \"\"\"\n Syncs directories between hosts\n \"\"\"\n\n def main(self):\n arguments = docopt(__doc__)\n\n # convert args to dotmap\n args = DotMap(_dynamic=False)\n for k, v in arguments.iteritems():\n args[k.replace('--', '')] = v\n\n # Add default and convert data types if needed\n args.loglevel = int(args.loglevel)\n args.path = os.path.abspath(args.path).rstrip('/') if args.path else os.path.abspath((os.getcwd()))\n\n logging.root.setLevel(args.loglevel)\n # register given directory\n if args.register:\n self.register_directory(args)\n exit(0)\n\n working_dir = args.path\n dir_profile = WK_SYN_DIR_FORMAT.format(\n USER_HOME,\n WK_BASE_DIR,\n os.path.split(working_dir)[1],\n hashlib.md5(working_dir).hexdigest()\n )\n if not os.path.exists(dir_profile):\n logging.error(\n \"Directory {} is not registered with drsync, run with '-r' to register\".format(working_dir))\n exit(1)\n\n if args.lr:\n logging.info(\"Following sync location are available.\\n\\t{}\".format(\n '\\n\\t'.join(os.listdir(dir_profile))\n ))\n exit(0)\n\n _dir_profile = \"{}/{}\".format(dir_profile, args.SYNCLOCAITON)\n if not os.path.exists(_dir_profile):\n logging.error(\"Invalid sync location name, valid names are:\\n\\t{}\".format(\n '\\t\\n'.join(os.listdir(dir_profile))))\n exit(1)\n else:\n dir_profile = _dir_profile\n\n # sync given directory\n logging.info(\"Synchronizing directory\")\n wk_conf_file = WK_CONF_FILE.format(dir_profile)\n\n sync(wk_conf_file,\n dry_run=(args.sendtest or args.fetchtest),\n live=args.livesync,\n reverse_direction=(args.fetch or args.fetchtest))\n\n def register_directory(self, args):\n \"\"\"Configures a directory to work with drsync.\"\"\"\n # Ask Questions and write wk sync conf file\n working_dir = args.path\n config = dict()\n config['working_dir_parent'] = os.path.dirname(working_dir)\n config['working_dir_name'] = os.path.split(working_dir)[-1]\n\n host_path = raw_input(\n 'Enter @:, where path is parent directory where directory to be synced will be created:')\n\n remote_host, remote_working_dir_parent = host_path.rsplit(':')\n remote_working_dir_parent = remote_working_dir_parent.rstrip('/')\n\n sync_task_name = raw_input(\"Enter a name which you would like to identify above path as sync_task:\")\n\n dir_profile = WK_SYN_DIR_FORMAT.format(\n USER_HOME,\n WK_BASE_DIR,\n os.path.split(working_dir)[1],\n hashlib.md5(working_dir).hexdigest()\n )\n dir_profile = \"{}/{}\".format(dir_profile, sync_task_name)\n\n config['dir_profile'] = dir_profile\n config['remote_host_name'] = remote_host\n config['remote_working_dir_parent'] = remote_working_dir_parent\n\n # create directory\n logging.info(\"Registering directory {} as sync task {}\".format(working_dir, sync_task_name))\n os.makedirs(dir_profile)\n\n # write rsync filter file\n rsync_filter_file = WK_RSYNC_FILTER_FILE.format(dir_profile)\n wk_conf_file = WK_CONF_FILE.format(dir_profile)\n logging.info('Writing filter file to {0}'.format(rsync_filter_file))\n with open(rsync_filter_file, 'w') as f:\n f.write(get_file_content('rsync_filter.txt'))\n\n logging.info(\"Writing configuration data to {0}\".format(wk_conf_file))\n with open(wk_conf_file, 'w') as f:\n f.write(get_file_content('drsync_conf.txt').format(**config))\n\n logging.info(get_file_content('post_reg_msg.txt'))\n","repo_name":"sheoran/drsync","sub_path":"drsync/drsync.py","file_name":"drsync.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"1703056429","text":"from keras import backend as K\nfrom keras.engine.topology import Layer\nimport tensorflow as tf\n\nclass MCCA(Layer):\n '''\n MCCA layer is used compute the MCCA objective\n \n '''\n\n def __init__(self, output_dim=1, feature_shape = 10, num_sets = 3, name='mcca_layer'):\n \n super(MCCA, self).__init__()\n self.output_dim = output_dim\n self.f = feature_shape\n self.N = num_sets\n self.name = name\n \n\n def build(self, input_shape):\n \n super(MCCA, self).build(input_shape)\n\n def call(self, x):\n \n # mean\n one = tf.constant([1.0])\n sample = tf.shape(x)[0]\n sample_float = tf.cast(sample, 'float')\n \n partition = tf.divide(one, sample_float)\n xbar = K.transpose(x) - partition * tf.matmul(K.transpose(x), tf.ones([sample, sample]))\n R = tf.matmul(xbar, tf.transpose(xbar))\n Rs = tf.Variable(tf.zeros_like(R))\n indices = []\n values = []\n for i in range(self.N):\n for j in range(self.f):\n for k in range(self.f):\n indices.append([j + i * self.f, k + i * self.f])\n values.append(R[j + i * self.f, k + i * self.f])\n \n S = tf.scatter_nd_update(Rs, indices, values)\n T = tf.matmul(tf.linalg.inv(S + tf.constant([1e-6]) * tf.eye(self.f*self.N)), R - S)\n \n U, V = tf.linalg.eigh(T)\n U_sort, _ = tf.nn.top_k(U, 1)\n corr = K.sum(K.sqrt(U_sort))\n \n return -corr\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.output_dim)\n\n def get_config(self):\n config = {\n 'output_dim': self.output_dim,\n 'sets_dim': self.N,\n }\n base_config = super(MCCA, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))","repo_name":"Yansf677/Deep-multiset-CCA","sub_path":"mcca_layer.py","file_name":"mcca_layer.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20292166924","text":"import re\nprint(\"Our Magical Calculator:\")\nprint(\"Type 'quit' to exit\")\nrun=True\nprevious=0\ndef performMath():\n global run\n global previous\n equation=\"\"\n if previous==0:\n equation=input(\"enter a equation:\")\n else:\n equation=input(str(previous))\n if equation==\"quit\":\n run=False\n print('Good bye::::')\n else:\n equation=re.sub('[a-zA-Z,:,;]','',equation)\n if previous==0:\n previous=eval(equation)\n\n else:\n previous=eval(str(previous)+equation)\n\nwhile run:\n performMath()","repo_name":"Rahul-Agg/Calculator_python","sub_path":"buildcalculator.py","file_name":"buildcalculator.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10271865904","text":"from django.urls import include, path\nfrom .views import *\n\napp_name = 'user'\n\nurlpatterns = [\n # user/\n path('lecturer/', include('django.contrib.auth.urls')),\n path('lecturer/list/', LecturerList.as_view(), name='lecturer-list'),\n path('lecturer/register/', LecturerRegister.as_view(), name='lecturer-register'),\n path('lecturer//delete/', lecturer_delete, name='lecturer-delete'),\n path('student/', include('django.contrib.auth.urls')),\n path('student/register/', StudentRegister.as_view(), name='student-register'),\n]\n","repo_name":"Vezral/ResultAggregator","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5800154701","text":"import dml\nimport prov.model\nimport datetime\nimport json\nimport uuid\nimport folium\nimport os\nfrom math import cos, asin, sqrt\n\n\nclass display():\n contributor = 'jhs2018_rpm1995'\n reads = ['jhs2018_rpm1995.greenassets']\n writes = ['jhs2018_rpm1995.kmeansdata']\n\n def __init__(self, scale):\n self.execute(scale)\n\n def findcell(self, value, axis):\n # return min(axis, key=lambda x: abs(x - value)) # Wrong logic... Spent an entire night on this line\n for i in range(1, len(axis)):\n if axis[i] > value:\n return axis[i - 1]\n\n def distance(self, lat1, lon1, lat2, lon2):\n p = 0.017453292519943295\n a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2\n return 12742 * asin(sqrt(a))\n\n def closestpoint(self, cells, long, lat): # Will never be able to recreate this again\n return min(cells, key=lambda x: display.distance(self, lat, long, x[0][1], x[0][0]))\n\n def execute(self, scale):\n # Retrieve datasets\n startTime = datetime.datetime.now()\n #scale = float(input(\"Please enter Scale [0.01 - 0.1] \"))\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jhs2018_rpm1995', 'jhs2018_rpm1995')\n\n print(\"Now running display.py\")\n\n dir_path = os.path.dirname(os.path.abspath(__file__))\n # dir_path = \"C://cs591//course-2018-spr-proj//jhs2018_rpm1995\"\n filenamemap = os.path.join(dir_path, \"assets.html\")\n map_osm = folium.Map(location=[39, -98.1], zoom_start=4)\n\n assets = repo.jhs2018_rpm1995.greenassets.find()\n budgets = repo.jhs2018_rpm1995.greenobjects.find({\"Type\": {\"$eq\": \"budget\"}})\n\n # for asset in assets: # Uncomment these four lines to see assets on the map\n # coords = (asset['Location'][1], asset['Location'][0])\n # folium.Marker(coords, popup=str(coords)).add_to(map_osm)\n # map_osm.save(filenamemap)\n\n grid = {} # This will contain coordinates of the grid as keys, and assets assigned to that grid as values\n cells = [] # To hold just the coordinates of the grid\n megalist = [] # Will hold data to write to database\n\n i = -71.189\n while i < -70.878:\n j = 42.234\n while j < 42.406:\n coords = (j, i)\n # folium.Marker(coords, popup=str(coords)).add_to(map_osm) # Uncomment to see grid on map\n # grid[coords] = 0 # For overall counts\n grid[coords] = [[0], [0], [0], [0], [0], [0]] # [[charge], [hubway], [open spaces], [trees],\n # [budget], [crime]]\n cells.append(coords)\n j += scale\n i += scale\n # map_osm.save(filenamemap) #\n\n xaxis = [] # Adjust scale of grid here\n i = -71.189\n while i < -70.878:\n xaxis.append(i)\n i += scale\n\n yaxis = [] # Adjust scale of grid here\n i = 42.234\n while i < 42.406:\n yaxis.append(i)\n i += scale\n\n budget_coords = [] # To store coordinates of budgets\n for budget in budgets:\n budget_coords.append([budget['Location'], budget['Budget']])\n\n # for coords in budget_coords:\n # print(\"For coords: \" + str(coords))\n # cell = display.closestcell(cells, coords[0], coords[1])\n # folium.Marker(cell, icon=folium.Icon(color='green')).add_to(map_osm)\n\n for cell in cells:\n answer = display.closestpoint(self, budget_coords, cell[1], cell[0]) # Hallelujah\n grid[cell][4][0] += float(answer[1]) # Storing budget\n\n for asset in assets: # This loop finds the cell that the asset belongs to and correspondingly\n y = asset['Location'][1] # ...increases the count of that asset type in the dictionary\n x = asset['Location'][0] # ...representation\n typekind = asset['Type']\n ycell = display.findcell(self, y, yaxis)\n xcell = display.findcell(self, x, xaxis)\n if (ycell, xcell) in grid: # O(1) lookup time. Hire me, Google\n # grid[(ycell, xcell)] += 1 # for overall counts\n if typekind == \"charge\":\n grid[(ycell, xcell)][0][0] += 1\n elif typekind == \"hubway\":\n grid[(ycell, xcell)][1][0] += 1\n elif typekind == \"openspace\":\n grid[(ycell, xcell)][2][0] += 1\n elif typekind == \"tree\":\n grid[(ycell, xcell)][3][0] += 1\n elif typekind == \"crime\":\n grid[(ycell,xcell)][5][0] += 1\n # grid[(ycell, xcell)][4][0] += 1\n for coords, counts in grid.items(): # Gonna save to database and display on map\n megalist.append({\"coordinates\": coords, \"charge_count\": counts[0][0], \"hubway_count\": counts[1][0],\n \"open_count\": counts[2][0], \"tree_count\": counts[3][0], \"budget\": counts[4][0],\n \"crime_count\": counts[5][0]})\n # megalist.append({\"coordinates\": coords, \"charge_count\": counts[0][0], \"hubway_count\": counts[1][0],\n # \"open_count\": counts[2][0], \"tree_count\": counts[3][0],\n # \"crime_count\": counts[4][0]})\n folium.Marker(coords, popup=str(counts)).add_to(map_osm)\n\n repo.dropCollection(\"kmeansdata\")\n repo.createCollection(\"kmeansdata\")\n repo['jhs2018_rpm1995.kmeansdata'].insert_many(megalist)\n map_osm.save(filenamemap)\n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\": startTime, \"end\": endTime}","repo_name":"j-silverman/DataMechanicsSpringProject","sub_path":"displayforflask.py","file_name":"displayforflask.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33402053799","text":"from classes import ItemLargura, ResultadoCusto, TestManager\nfrom common import get_cost_per_mm\n\n\ndef greedy_choice(\n test_case: TestManager,\n current_item: ItemLargura,\n rolos: list = [],\n total_cost: int = 0\n ) -> ResultadoCusto:\n \"\"\"\n O parâmetro de escolha gulosa é o custo por mm de redução, e não\n simplesmente o menor custo de redução \n \"\"\"\n reduction = current_item.reduction_needed()\n\n cost_per_mm = get_cost_per_mm(current_item)\n if '3' in cost_per_mm and reduction < 3: cost_per_mm.pop('3')\n if '2' in cost_per_mm and reduction < 2: cost_per_mm.pop('2')\n\n best_cilinder_size = int(min(cost_per_mm, key=cost_per_mm.get))\n reduction -= best_cilinder_size\n reduction_cost = current_item.cost_by_number(best_cilinder_size)\n rolos.append(ResultadoCusto.build_rolo_info(\n current_item.width, best_cilinder_size, reduction_cost\n ))\n total_cost += reduction_cost\n\n if reduction == 0:\n return ResultadoCusto(current_item, total_cost, rolos)\n else:\n current_item = test_case.get_cost(current_item.width - best_cilinder_size)\n return greedy_choice(test_case, current_item, rolos, total_cost)\n\n\ndef get_best_option(test_case: TestManager) -> ResultadoCusto:\n \"\"\"\n Recebe o TestManager e retorna o ResultadoCusto com a melhor forma de se chegar em 4mm\n utilizando um algoritmo guloso\n \"\"\"\n initial_item = test_case.widthList.pop(0)\n best_result = greedy_choice(test_case, initial_item, [], 0)\n best_result.item = initial_item\n return best_result\n","repo_name":"pud1m/puc-paa-final","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1577376654","text":"#!/usr/bin/env python\n\nimport os\nimport shutil\nimport time\nimport urllib\nimport json\nimport re\n\nPOST_RE = re.compile('^#([^\\n]+)')\n\n\n# ============ helpers ============\ndef md_to_html(md):\n\tparams = json.dumps({'text': md, 'mode': 'markdown'}).encode('utf-8')\n\thandle = urllib.urlopen('https://api.github.com/markdown', params)\n\treturn handle.read()\n\n\ndef get_metainfo(md):\n\tm = POST_RE.match(md)\n\tif m:\n\t\treturn m.group(1)\n\n\ndef get_first_commit_date(path):\n\tcmd = 'git log --reverse --pretty=format:\"%%cd\" --date=short %s'\n\tdate = os.popen(cmd % path).readline().strip()\n\treturn time.strptime(date, '%Y-%m-%d')\n\n\ndef write_file(path, text):\n\td = os.path.dirname(path)\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)\n\topen(path, 'w').write(text)\n\n\n# ============ class ============\nclass BlogSubject(object):\n\tdef __init__(self, path):\n\t\tmd = open(path, 'r').read()\n\n\t\tself.title = get_metainfo(md)\n\t\tself.content = md_to_html(md)\n\n\t\tself._path = 'subject/' + os.path.basename(path).replace('.md', '.html')\n\n\tdef write_html(self, base_path):\n\t\ttemplate = open('template/page.html', 'r').read()\n\t\thtml = template.replace('-TITLE-', self.title)\n\t\thtml = html.replace('-CONTENT-', self.content)\n\t\thtml = html.replace('-ID-', self._path)\n\t\thtml = html.replace('-URL-', \"http://qixiaoxia.com/\" + self._path)\n\t\twrite_file(os.path.join(base_path, self._path), html)\n\n\nclass BlogPost(object):\n\tdef __init__(self, path):\n\t\tmd = open(path, 'r').read()\n\n\t\tself.title = get_metainfo(md)\n\t\tself.content = md_to_html(md)\n\t\tself.date = get_first_commit_date(path)\n\n\t\tself._path = 'blog/' + os.path.basename(path).replace('.md', '.html')\n\n\tdef write_html(self, base_path):\n\t\ttemplate = open('template/page.html', 'r').read()\n\t\thtml = template.replace('-TITLE-', self.title)\n\t\thtml = html.replace('-CONTENT-', self.content)\n\t\thtml = html.replace('-ID-', self._path)\n\t\thtml = html.replace('-URL-', \"http://qixiaoxia.com/\" + self._path)\n\t\twrite_file(os.path.join(base_path, self._path), html)\n\n\tdef get_index_html(self):\n\t\ttemplate = '%s - %s'\n\t\tdate = time.strftime('%d %b', self.date)\n\t\treturn template % (date, self._path, self.title)\n\n\n# ============ entry ============\ndef main():\n\tif not os.path.exists('site/subject/r'):\n\t\tos.mkdir('site/subject/r')\n\tfor fn in os.listdir('subject/r'):\n\t\tshutil.copy('subject/r/' + fn, 'site/subject/r/' + fn)\n\n\tif not os.path.exists('site/blog/r'):\n\t\tos.mkdir('site/blog/r')\n\tfor fn in os.listdir('post/r'):\n\t\tshutil.copy('post/r/' + fn, 'site/blog/r/' + fn)\n\n\tfor fn in os.listdir('subject'):\n\t\tp = os.path.join('subject', fn)\n\t\tif os.path.isdir(p):\n\t\t\tcontinue\n\t\tif p.startswith('.'):\n\t\t\tcontinue\n\t\tif not p.endswith('.md'):\n\t\t\tcontinue\n\n\t\tsubject = BlogSubject(p)\n\t\tsubject.write_html('site')\n\n\tall_post = []\n\tfor fn in os.listdir('post'):\n\t\tp = os.path.join('post', fn)\n\t\tif os.path.isdir(p):\n\t\t\tcontinue\n\t\tif p.startswith('.'):\n\t\t\tcontinue\n\t\tif not p.endswith('.md'):\n\t\t\tcontinue\n\n\t\tpost = BlogPost(p)\n\t\tall_post.append(post)\n\n\tall_post.sort(key=lambda x: x.date, reverse=True)\n\tcur_year = 0\n\ttags = []\n\tfor post in all_post:\n\t\tyear = post.date.tm_year\n\t\tif year != cur_year:\n\t\t\tif cur_year != 0:\n\t\t\t\ttags.append('')\n\t\t\ttags.append('

%d

' % year)\n\t\t\ttags.append('
    ')\n\t\t\tcur_year = year\n\t\ttags.append('
  • %s
  • ' % post.get_index_html())\n\t\tpost.write_html('site')\n\ttags.append('
')\n\n\ttemplate = open('template/index.html', 'r').read()\n\thtml = template.replace('-BLOG-', ''.join(tags))\n\twrite_file('site/index.html', html)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"skykapok/blog-qixiaoxia","sub_path":"reload.py","file_name":"reload.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"455224324","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\ndef read_chartmetric_data(base_url, artist_id, incident_date, artist_type):\n\n # Read in the data\n df = pd.read_csv(f'{base_url}/API_data/{artist_type}/{artist_id}_chartmetric_{incident_date}.csv')\n\n return df\n\ndef clean_chartmetric_data(df):\n\n chartmetric_trim = df.drop(columns=['Unnamed: 0','listener_change'\n ,'spins_change', 'follower_pct_change_insta'\n ,'follower_pct_change_tiktok', 'follower_pct_change_youtube'])\n\n chartmetric_trim['date'] = pd.to_datetime(chartmetric_trim['date'])\n\n chartmetric_df = chartmetric_trim.set_index('date')\n\n # chartmetric_scaler = StandardScaler()\n\n # chartmetric_df = chartmetric_scaler.fit_transform(chartmetric_df)\n\n return chartmetric_df\n","repo_name":"srs366/musicians_v_cancellation","sub_path":"scripts/Merging_Data/Preprocessing/chartmetric_clean.py","file_name":"chartmetric_clean.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"73680243957","text":"def numbers(num):\n mayor = -1\n while num >= 0:\n if num > mayor:\n return num\n return num\n\nif __name__ == \"__main__\":\n #entrada\n num = int(input(\"Número positivo: \"))\n \n #proceso\n mayor = -1\n while num > 0:\n if num > mayor:\n mayor = num\n num = int(input(\"Número positivo: \"))\n\n #salida\n print(\"Mayor número ingresado:\", mayor)\n","repo_name":"IES-Rafael-Alberti/2324-u2-sentencias-repetitivas-Albertopinero","sub_path":"src/ejercicio16.py","file_name":"ejercicio16.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7480745740","text":"import gradio as gr\n\n\ndef image(im):\n return im\n\n\nwith gr.Blocks() as demo:\n im = gr.Image()\n im2 = gr.Image()\n btn = gr.Button()\n btn.click(lambda x: x, outputs=im2, inputs=im)\n\n\nif __name__ == \"__main__\":\n demo.launch()\n","repo_name":"gradio-app/gradio","sub_path":"demo/image-simple/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":23611,"dataset":"github-code","pt":"4"} +{"seq_id":"30339712073","text":"import tkinter as tk #GUI(Graphical User Interface)の作成\r\n\r\n# 電卓の表示形式\r\nh_gamen = tk.Tk()\r\nh_gamen.title(\"calculator\")\r\nh_hyouji = tk.Label(h_gamen, fg=\"red\", bg=\"yellow\") \r\nh_hyouji.grid(row=0, column=0, columnspan=3, sticky=tk.W)\r\ndef Num_hyouji(h_event):\r\n h_num_button = h_event.widget.cget(\"text\")\r\n h_keta_hyouji = h_hyouji.cget(\"text\") \r\n h_hyouji[\"text\"] = h_keta_hyouji + h_num_button\r\n\r\n# リセットボタンの動作\r\ndef all_clear(h_event):\r\n h_hyouji[\"text\"] = \"\"\r\n\r\n# 例外処理\r\ndef calculate(h_event):\r\n try:\r\n h_keisan = h_hyouji.cget(\"text\")\r\n h_hyouji[\"text\"] = str(eval(h_keisan))\r\n except:\r\n h_hyouji[\"text\"] = \"ERROR!\"\r\n\r\n# 数字、小数点ボタンの設定\r\nfor j in range(3):\r\n for i in range(3):\r\n h_button = tk.Button(h_gamen, text=str(7+i-3*j), width=10)\r\n h_button.grid(row=j+1, column=i)\r\n h_button.bind(\"\", Num_hyouji)\r\n\r\nh_button = tk.Button(h_gamen, text=\"0\")\r\nh_button.grid(row=4, column=0, columnspan=2, sticky=tk.W+tk.E) #W+Eで左右いっぱい\r\nh_button.bind(\"\", Num_hyouji)\r\n\r\nh_button = tk.Button(h_gamen, text=\".\", width=10)\r\nh_button.grid(row=4, column=2,)\r\nh_button.bind(\"\", Num_hyouji)\r\n\r\n# 演算子ボタンの設定\r\nh_enzanshi = [\"+\", \"-\", \"*\", \"/\"]\r\nfor i in range(len(h_enzanshi)):\r\n h_button = tk.Button(h_gamen, text=h_enzanshi[i], width=5)\r\n h_button.grid(row=1+i, column=3)\r\n h_button.bind(\"\", Num_hyouji)\r\n\r\nh_button = tk.Button(h_gamen, text=\"=\", width=5)\r\nh_button.grid(row=5, column=3)\r\nh_button.bind(\"\", calculate)\r\n\r\n# リセットボタンの設定\r\nh_button = tk.Button(h_gamen, text=\"All Clear\")\r\nh_button.grid(row=5, column=0, columnspan=3, sticky=tk.W+tk.E)\r\nh_button.bind(\"\", all_clear)\r\n\r\nh_gamen.mainloop()\r\n","repo_name":"TaketoSuehiro/Intern","sub_path":"MyDentak.py","file_name":"MyDentak.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38074750431","text":"import cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom gym import spaces\nfrom torch import optim\n\nfrom lge.inverse_model import ConvInverseModel\n\n\ndef sample_image():\n # Sample fake Atari image (stacking of 4 frames)\n img = np.random.randint(0, 255, (4, 4, 3)).astype(np.float32)\n img = cv2.resize(img, (84, 84)) # (4 x 4) to (84 x 84)\n img = np.moveaxis(img, 2, 0) # (H x W x C) to (C x H x W)\n return img\n\n\ndef test_train_inverse_dynamic():\n action_space = spaces.Discrete(10)\n\n n_obs = 3 # Number of possible observations\n all_observations = torch.tensor([sample_image() for _ in range(n_obs)]) / 255\n\n # For each possible couple (observation, next_observation), we define an action\n all_actions = torch.tensor([[action_space.sample() for _ in range(n_obs)] for _ in range(n_obs)])\n\n inverse_model = ConvInverseModel(action_size=action_space.n, latent_size=4)\n optimizer = optim.Adam(inverse_model.parameters(), lr=1e-3)\n\n for _ in range(100):\n batch_size = 32\n # Sample\n obs_idx = torch.randint(0, 3, size=(batch_size,))\n next_obs_idx = torch.randint(0, 3, size=(batch_size,))\n observations = all_observations[obs_idx]\n next_observations = all_observations[next_obs_idx]\n actions = all_actions[obs_idx, next_obs_idx]\n\n # Compute the output image\n inverse_model.train()\n pred_actions = inverse_model(observations, next_observations)\n\n # Compute the loss\n loss = F.cross_entropy(pred_actions, actions.squeeze())\n\n # Step the optimizer\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n assert loss < 0.75\n","repo_name":"emrul/lge","sub_path":"tests/inverse_model_test.py","file_name":"inverse_model_test.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"40585749753","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import fft\nimport scipy.optimize\n\nx = np.linspace(-5*np.pi, 5*np.pi, 1001)\ndx = x[1] - x[0]\n\n# function which should match data\ndef model_fn(x, p):\n v = p[0] * np.sin(2*np.pi * p[1] * x + p[2]) + p[3]\n return v\n\ndef model_jacobian(x, p):\n dp0 = np.sin(2*np.pi * p[1] * x + p[2])\n dp1 = p[0] * (2 * np.pi * x) * np.cos(2*np.pi * p[1] * x + p[2])\n dp2 = p[0] * np.cos(2*np.pi * p[1] * x + p[2])\n dp3 = np.ones(x.shape)\n return np.stack((dp0, dp1, dp2, dp3), axis=1)\n\ntrue_params = [0.23626, 0.2367, 1.246236, 0]\ny = model_fn(x, true_params)\ny_noisy = y + np.random.normal(scale=0.05, size=y.shape)\n\n# function which should be minimized ... or rather least_squares will take the sum of the squares to minimize it...\ndef min_fn(p): return model_fn(x, p) - y_noisy\n\n# need good guess of initial frequency for fit to converge. Get this from FFT of data\ny_ft = fft.fftshift(fft.fft(fft.ifftshift(y)))\nfrqs = fft.fftshift(fft.fftfreq(x.size, dx))\ndf = frqs[1] - frqs[0]\n\n# guess parameters\npeak_index = np.argmax(np.abs(y_ft) * (frqs > 0)) # restrict to positive peak\nf_guess = frqs[peak_index]\namp_guess = 2 / x.size * np.abs(y_ft[peak_index])\nphi_guess = np.angle(y_ft[peak_index])\nbg_guess = np.mean(y_noisy)\n\n# sample fitting without jacobian\ninit_params = np.array([amp_guess, f_guess, phi_guess, bg_guess])\nlower_bounds = [0, 0, -np.inf, -np.inf]\nupper_bounds = [np.inf, np.inf, np.inf, np.inf]\nresults = scipy.optimize.least_squares(min_fn, init_params,\n bounds=(lower_bounds, upper_bounds),\n jac=lambda p: model_jacobian(x, p))\nfit_params = results[\"x\"]\n\n# interpolated fit function\nx_interp = np.linspace(x.min(), x.max(), 1001)\ny_guess = model_fn(x_interp, init_params)\ny_fit = model_fn(x_interp, fit_params)\n\nfigh = plt.figure()\nfigh.suptitle(\"sample fitting with scipy.optimize\")\nax = figh.add_subplot(1, 1, 1)\nax.plot(x, y_noisy, 'rx', label=\"noisy data\")\n#ax.plot(x_interp, y_guess, 'k', label=\"initial guess\")\nax.plot(x_interp, y_fit, 'b', label=\"fit\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\n\nplt.show()","repo_name":"fdjutant/flagella3D","sub_path":"scripts/2022_03_28_sinusiodal_fitting_peter.py","file_name":"2022_03_28_sinusiodal_fitting_peter.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33448838279","text":"import torch\nimport torch.nn as nn\nimport torchvision.transforms.functional as TF\n\n\nclass DoubleConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DoubleConv, self).__init__()\n self.conv = nn.Sequential(\n #nn.Dropout2d(p=0.2),\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n #nn.Dropout2d(p=0.2),\n nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass UNET(nn.Module): \n def __init__(\n self, in_channels=3, out_channels=1, features=[64, 128, 256, 512],#, 512],\n ):\n super(UNET, self).__init__()\n self.decoder = nn.ModuleList()\n self.encoder = nn.ModuleList()\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n # encoder part of UNET\n for feature in features:\n self.encoder.append(DoubleConv(in_channels, feature))\n in_channels = feature\n\n # decoder part of UNET\n for feature in reversed(features):\n self.decoder.append(\n nn.ConvTranspose2d(\n feature*2, feature, kernel_size=2, stride=2,\n )\n )\n self.decoder.append(DoubleConv(feature*2, feature))\n\n self.bottleneck = DoubleConv(features[-1], features[-1]*2)\n self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)\n\n def forward(self, x):\n #print(x.is_cuda)\n skip_connections = []\n\n for down in self.encoder:\n x = down(x) # Apply double convulution\n skip_connections.append(x) # save output for the decoder (Skip connection)\n x = self.pool(x) # Apply max pooling operation\n \n x = self.bottleneck(x) # Apply double convulution for the bottleneck\n skip_connections = skip_connections[::-1] # flip the order of the saved skip connections\n\n for idx in range(0, len(self.decoder), 2):\n x = self.decoder[idx](x) # Apply decoderampling\n skip_connection = skip_connections[idx//2] # get skip connection\n\n if x.shape != skip_connection.shape:\n x = TF.resize(x, size=skip_connection.shape[2:])\n\n concat_skip = torch.cat((skip_connection, x), dim=1) # concatenate skip connection\n x = self.decoder[idx+1](concat_skip) # decoderample \n\n return self.final_conv(x)\n\n\n\n\ndef test():\n x = torch.randn((3, 1, 161, 161))\n model = UNET(in_channels=1, out_channels=1)\n preds = model(x)\n assert preds.shape == x.shape\n print(type(preds))\n\n\nif __name__ == \"__main__\":\n test()","repo_name":"dadihrannar1/P5_asphalt_robot","sub_path":"Training/utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"41528433421","text":"from utilities import *\n\ndef proposeExtrapolations(programs, N=30):\n trace = programs[0].explode().convertToSequence().removeDuplicates()\n originalUndesirability = trace.undesirabilityVector()\n\n extrapolations = []\n\n extrapolationGenerators = [ program.explode().extrapolations() for program in programs ]\n for e in interleaveGenerators(extrapolationGenerators):\n t = e.convertToSequence().removeDuplicates()\n newUndesirability = t.undesirabilityVector()\n badness = (newUndesirability > originalUndesirability).sum()\n if t.canonicalTranslation() == trace.canonicalTranslation(): continue\n if any([t.canonicalTranslation() == o.canonicalTranslation() for _,o in extrapolations ]): continue\n extrapolations.append((badness,t))\n\n extrapolations.sort(key=lambda bo: bo[0])\n return [o for _,o in extrapolations ][:N]\n\ndef exportExtrapolations(programs, fn, index=None):\n extrapolations = proposeExtrapolations(programs)\n framedExtrapolations = [ frameImageNicely(t.draw(adjustCanvasSize = True))\n for t in extrapolations ]\n if index is not None:\n framedExtrapolations = [1 - frameImageNicely(loadImage(index))] + framedExtrapolations\n a = 255*makeImageArray(framedExtrapolations)\n saveMatrixAsImage(a,fn)\n\n","repo_name":"ellisk42/TikZ","sub_path":"extrapolate.py","file_name":"extrapolate.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"4"} +{"seq_id":"1268847011","text":"#BOJ-1012-DFS와_BFS\nimport sys\nfrom collections import deque\n\nn, m, start = map(int, sys.stdin.readline().split()) \ngraph = [[] for i in range(n+1)]\n\nfor i in range(m):\n n1, n2 = map(int, sys.stdin.readline().split())\n graph[n1].append(n2)\n graph[n2].append(n1) \n\ndef dfs(graph, start, visited):\n visited[start] = True\n print(start, end=' ')\n graph[start].sort() \n for node in graph[start]:\n if not visited[node]:\n dfs(graph, node, visited)\n\ndef bfs(graph, start, visited):\n queue = deque([start])\n visited[start] = True\n while queue:\n now = queue.popleft()\n print(now, end=' ')\n graph[now].sort() \n for i in graph[now]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n\nvisited = [False] * (n+1)\ndfs(graph, start, visited)\nvisited = [False] * (n+1)\nprint()\nbfs(graph, start, visited)","repo_name":"ohjiae/Algorithm_Study_2022","sub_path":"bfs_dfs/koodaeun/BOJ-1012-DFS와_BFS.py","file_name":"BOJ-1012-DFS와_BFS.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"21607630438","text":"from rdkit import Chem\nfrom rdkit.Chem import rdmolfiles\nimport json\nimport os\nimport argparse\nimport pandas as pd\nimport ast\nfrom pprint import pprint \nfrom time import time \nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm \nimport cv2\nimport shutil \nimport torch \nfrom more_itertools import chunked\nfrom PIL import Image \n\nfrom molgrapher.models.graph_recognizer import GraphRecognizer\nfrom molgrapher.models.abbreviation_detector import AbbreviationDetectorCPU, AbbreviationDetectorGPU, SpellingCorrector\nfrom molgrapher.datasets.dataset_image import ImageDataset\nfrom molgrapher.data_modules.data_module import DataModule\nfrom molgrapher.utils.utils_dataset import get_bonds_sizes\nfrom mol_depict.utils.utils_generation import get_abbreviations_smiles_mapping\nfrom mol_depict.utils.utils_drawing import draw_molecule_rdkit\nfrom molgrapher.utils.utils_logging import count_model_parameters\n\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\" \ncv2.setNumThreads(0)\ntorch.set_float32_matmul_precision(\"medium\")\n\ndef proceed_batch(args, input_images_paths):\n # Configuration\n model_name = \"wedge_4\"\n clean = True\n # Action\n predict = True\n visualize = False\n visualize_rdkit = False\n # DataLoader\n preprocess = True\n # Save\n clean = True\n visualize_output_folder_path = os.path.dirname(__file__) + \"/../../../data/visualization/predictions/PatCID_1K/\"\n visualize_rdkit_output_folder_path = os.path.dirname(__file__) + \"/../../../data/predictions/rdkit/\"\n \n if visualize:\n if clean and (os.path.exists(visualize_output_folder_path)):\n shutil.rmtree(visualize_output_folder_path)\n if not os.path.exists(visualize_output_folder_path):\n os.makedirs(visualize_output_folder_path)\n if predict and (args.save_mol_folder != \"\"):\n if clean and (os.path.exists(args.save_mol_folder)):\n shutil.rmtree(args.save_mol_folder)\n if not os.path.exists(args.save_mol_folder):\n os.makedirs(args.save_mol_folder)\n \n # Automatically set CPU/GPU device\n if not(args.force_cpu):\n args.force_cpu = not(torch.cuda.is_available())\n print(f\"PyTorch device: {'gpu' if not(args.force_cpu) else 'cpu'}\")\n\n # Read config file\n with open(args.config_dataset_graph_path) as file:\n config_dataset_graph = json.load(file)\n with open(args.config_training_graph_path) as file:\n config_training_graph = json.load(file)\n with open(args.config_dataset_keypoint_path) as file:\n config_dataset_keypoint = json.load(file)\n with open(args.config_training_keypoint_path) as file:\n config_training_keypoint = json.load(file)\n # Update config \n config_dataset_graph[\"num_processes_mp\"] = args.num_processes_mp\n config_dataset_graph[\"num_threads_pytorch\"] = args.num_threads_pytorch\n config_dataset_keypoint[\"num_processes_mp\"] = args.num_processes_mp\n config_dataset_keypoint[\"num_threads_pytorch\"] = args.num_threads_pytorch\n\n # Read dataset images\n data_module = DataModule(\n config_dataset_graph, \n dataset_class = ImageDataset,\n images_folder_path = None,\n images_paths = input_images_paths,\n force_cpu = args.force_cpu,\n remove_captions = args.remove_captions\n )\n data_module.setup_images_benchmarks()\n if preprocess:\n print(f\"Starting Caption Removal Preprocessing\")\n ref_t = time()\n data_module.preprocess()\n print(f\"Caption Removal Preprocessing completed in {round(time() - ref_t, 2)}\")\n\n # Read model\n model = GraphRecognizer(\n config_dataset_keypoint, \n config_training_keypoint, \n config_dataset_graph, \n config_training_graph\n )\n\n print(f\"Keypoint detector number parameters: {round(count_model_parameters(model.keypoint_detector)/10**6, 4)} M\")\n print(f\"Node classifier number parameters: {round(count_model_parameters(model.graph_classifier)/10**6, 4)} M\")\n\n # Set up trainer\n if args.force_cpu:\n trainer = pl.Trainer(\n accelerator = \"cpu\",\n precision = config_training_graph[\"precision\"],\n logger = False\n )\n else:\n trainer = pl.Trainer(\n accelerator = config_training_graph[\"accelerator\"],\n devices = config_training_graph[\"devices\"],\n precision = config_training_graph[\"precision\"],\n logger = False\n )\n\n # Get predictions\n torch.set_num_threads(config_dataset_graph[\"num_threads_pytorch\"])\n print(f\"Starting Keypoint Detection + Node Classification\")\n ref_t = time()\n predictions_out = trainer.predict(model, dataloaders=data_module.predict_dataloader())\n print(f\"Keypoint Detection + Node Classification completed in {round(time() - ref_t, 2)}\")\n \n images_filenames = []\n images = []\n predictions = {\n \"graphs\": [], \n \"keypoints\": [], \n \"confidences\": []\n }\n for _ in range(len(predictions_out)):\n _prediction = predictions_out.pop(0)\n if _prediction is None:\n continue\n for _elem in _prediction[\"predictions_batch\"][\"graphs\"]:\n predictions[\"graphs\"].append(_elem)\n for _elem in _prediction[\"predictions_batch\"][\"keypoints_batch\"]:\n predictions[\"keypoints\"].append(_elem)\n for _elem in _prediction[\"predictions_batch\"][\"confidences\"]:\n predictions[\"confidences\"].append(_elem)\n for _elem in _prediction[\"batch\"][\"images_filenames\"]:\n images_filenames.append(_elem)\n for _elem in _prediction[\"batch\"][\"images\"]:\n images.append(_elem)\n \n scaling_factor = config_dataset_keypoint[\"image_size\"][1]//config_dataset_keypoint[\"mask_size\"][1]\n \n # Compute bond size\n bonds_sizes = get_bonds_sizes(predictions[\"keypoints\"], scaling_factor)\n \n # Recognize abbreviations\n print(f\"Starting Abbreviation Recognition\")\n ref_t = time()\n if args.force_cpu or config_training_graph[\"accelerator\"] == \"cpu\":\n abbreviation_detector = AbbreviationDetectorCPU(config_dataset_graph, force_cpu = args.force_cpu)\n else:\n abbreviation_detector = AbbreviationDetectorGPU(config_dataset_graph, force_cpu = args.force_cpu)\n abbreviations_list = abbreviation_detector.mp_run(images_filenames, predictions[\"graphs\"], bonds_sizes)\n print(f\"Abbreviation Recognition completed in {round(time() - ref_t, 2)}\")\n\n # Recognize stereochemistry\n if args.assign_stereo:\n print(f\"Starting Stereochemistry Recognition\")\n ref_t = time()\n stereochemistry_recognizer = StereochemistryRecognizer(config_dataset_graph)\n predictions[\"graphs\"] = stereochemistry_recognizer(images, predictions[\"graphs\"], bonds_sizes)\n print(f\"Stereochemistry Recognition completed in {round(time() - ref_t, 2)}\")\n\n # Create RDKit graph\n print(\"Starting Graph creation\")\n ref_t = time()\n with open(os.path.dirname(__file__) + \"/../../../data/ocr_mapping/ocr_atoms_classes_mapping.json\") as file:\n ocr_atoms_classes_mapping = json.load(file)\n abbreviations_smiles_mapping = get_abbreviations_smiles_mapping()\n predicted_molecules = []\n for abbreviations, graph in zip(abbreviations_list, predictions[\"graphs\"]):\n predicted_molecule = graph.to_rdkit(\n abbreviations, \n abbreviations_smiles_mapping, \n ocr_atoms_classes_mapping, \n SpellingCorrector(abbreviations_smiles_mapping),\n assign_stereo = args.assign_stereo,\n align_rdkit_output = args.align_rdkit_output\n ) \n predicted_molecules.append(predicted_molecule)\n print(f\"Graph creation completed in {round(time() - ref_t, 2)}\")\n predictions[\"molecules\"] = predicted_molecules\n\n # Convert to SMILES and set confidence\n predictions[\"smiles\"] = []\n for i, (predicted_molecule, image_filename) in enumerate(zip(predictions[\"molecules\"], images_filenames)):\n if args.save_mol_folder != \"\":\n molecule_path = args.save_mol_folder + image_filename.split(\"/\")[-1][:-4].replace(\"_preprocessed\", \"\") + \".mol\"\n rdmolfiles.MolToMolFile(\n predicted_molecule, \n molecule_path, \n kekulize = False \n )\n smiles = Chem.MolToSmiles(predicted_molecule)\n if smiles:\n predictions[\"smiles\"].append(smiles)\n if smiles == \"C\":\n predictions[\"confidences\"][i] = 0\n else:\n predictions[\"smiles\"].append(None)\n predictions[\"confidences\"][i] = 0\n print(\"The molecule can not be converted to a valid SMILES\")\n \n # Save annotations\n for predicted_smiles, confidence, image_filename, abbreviations in zip(predictions[\"smiles\"], predictions[\"confidences\"], input_images_paths, abbreviations_list):\n #annotation_filename = image_filename.split(\"/\")[1].split(\".\")[0] + \".jsonl\"\n annotation_filename = args.save_mol_folder + \"smiles.jsonl\"\n with open(annotation_filename, \"a\") as f:\n if predicted_smiles is not None:\n if abbreviations != []:\n abbreviations_texts = [abbreviation[\"text\"] for abbreviation in abbreviations]\n else:\n abbreviations_texts = []\n\n annotation = {\n \"smi\": predicted_smiles,\n \"abbreviations\": abbreviations_texts,\n \"conf\": confidence,\n \"file-info\": {\n \"filename\": image_filename, \n \"image_nbr\": 1\n },\n \"annotator\": {\n \"version\": \"1.0.0\",\n \"program\": \"MolGrapher\"\n }\n }\n \n json.dump(annotation, f)\n f.write('\\n')\n\n print(\"Annotation:\")\n print(pd.read_json(path_or_buf = annotation_filename, lines = True))\n \n\n # Visualize predictions\n if visualize:\n for image_filename, image, graph, keypoints, molecule in tqdm(zip(images_filenames, images, predictions[\"graphs\"], predictions[\"keypoints\"], predictions[\"molecules\"]), total=len(images_filenames)):\n smiles = Chem.MolToSmiles(molecule)\n if smiles != \"C\":\n figure, axis = plt.subplots(1, 3, figsize=(20, 10))\n else: \n figure, axis = plt.subplots(1, 2, figsize=(20, 10))\n axis[0].imshow(image.permute(1, 2, 0))\n \n axis[0].scatter(\n [(keypoint[0]*scaling_factor + scaling_factor//2) for keypoint in keypoints], \n [(keypoint[1]*scaling_factor + scaling_factor//2) for keypoint in keypoints], \n color = \"red\",\n alpha = 0.5\n )\n\n graph.display_data_nodes_only(axis=axis[1])\n \n if smiles != \"C\":\n image = draw_molecule_rdkit(\n smiles = smiles,\n molecule = molecule,\n augmentations = False,\n )\n if image is not None:\n axis[2].imshow(image.permute(1, 2, 0))\n\n plt.savefig(f\"{visualize_output_folder_path}/{image_filename.split('/')[-1]}\")\n plt.close()\n\n if visualize_rdkit:\n for image_filename in input_images_paths:\n molecule_path = args.save_mol_folder + image_filename.split(\"/\")[-1][:-4].replace(\"_preprocessed\", \"\") + \".mol\"\n if os.path.exists(molecule_path):\n print(molecule_path)\n image = Image.open(image_filename).convert(\"RGB\")\n figure, axis = plt.subplots(1, 2, figsize=(20, 10))\n axis[0].imshow(image)\n molecule = rdmolfiles.MolFromMolFile(molecule_path, sanitize = False)\n image = draw_molecule_rdkit(\n smiles = Chem.MolToSmiles(molecule),\n molecule = molecule,\n augmentations = False,\n )\n if image is not None:\n axis[1].imshow(image.permute(1, 2, 0))\n plt.savefig(f\"{visualize_rdkit_output_folder_path}/{image_filename.split('/')[-1]}\")\n plt.close()\n \n \n \ndef main():\n starting_time = time()\n parser = argparse.ArgumentParser()\n parser.add_argument('--input-images-paths', type = str)\n parser.add_argument('--force-cpu', action = argparse.BooleanOptionalAction, default = True, required = False)\n parser.add_argument('--num-threads-pytorch', type = int, default = 10)\n parser.add_argument('--num-processes-mp', type = int, default = 10)\n parser.add_argument('--chunk-size', type = int, default = 200)\n parser.add_argument('--assign-stereo', action = argparse.BooleanOptionalAction, default = True, required = False)\n parser.add_argument('--align-rdkit-output', type = bool, default = False)\n parser.add_argument('--remove-captions', action = argparse.BooleanOptionalAction, default = True, required = False)\n parser.add_argument('--save-mol-folder', type = str, default = \"\")\n parser.add_argument('--config-dataset-graph-path', type = str, default = os.path.dirname(__file__) + \"/../../../data/config_dataset_graph_2.json\")\n parser.add_argument('--config-training-graph-path', type = str, default = os.path.dirname(__file__) + \"/../../../data/config_training_graph.json\")\n parser.add_argument('--config-dataset_keypoint-path', type = str, default = os.path.dirname(__file__) + \"/../../../data/config_dataset_keypoint.json\")\n parser.add_argument('--config-training-keypoint-path', type = str, default = os.path.dirname(__file__) + \"/../../../data/config_training_keypoint.json\")\n args = parser.parse_args()\n\n print(\"Arguments:\")\n pprint(vars(args))\n\n with open(args.input_images_paths, 'r') as f:\n _input_images_paths = []\n for line in f.readlines():\n _input_images_paths.append(ast.literal_eval(line.strip())[\"path\"])\n print(\"Number of images to annotate: \", len(_input_images_paths))\n\n for _batch_images_paths in chunked(_input_images_paths, args.chunk_size):\n proceed_batch(args, _batch_images_paths)\n print(f\"Annotation completed in: {round(time() - starting_time, 2)}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DS4SD/MolGrapher","sub_path":"molgrapher/scripts/annotate/predict_molgrapher.py","file_name":"predict_molgrapher.py","file_ext":"py","file_size_in_byte":14380,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"4"} +{"seq_id":"33003887920","text":"import socketserver\nimport binascii\nfrom proto import updater_pb2\n\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i:i+size]\n\nclass RequestHandler(socketserver.BaseRequestHandler):\n def __init__(self, request, client_address, server):\n self.client_address = client_address\n print('Handling new request from %s' % self.client_address[0])\n super().__init__(request, client_address, server)\n\n def read_data(self, how_much):\n data = self.request.recv(how_much)\n if data == b'':\n print(\"socket connection with %s broken\" % self.client_address[0])\n return False\n return data\n\n def send_message(self, message):\n data = message.SerializeToString()\n data = bytes([len(data)]) + data\n self.request.sendall(data)\n\n def get_latest_rom(self):\n roms = [f for f in listdir('/binaries/') if isfile(join('/binaries/', f))]\n roms.sort(key=lambda s: list(map(int, s.replace('.bin', '').split('.'))))\n if len(roms) > 0:\n return roms[-1]\n else:\n return None\n\n def get_rom_chunks(self, rom):\n chunks = []\n with open(join('/binaries/', rom), 'rb') as f:\n ba = bytearray(f.read())\n chunks = list(chunked(64, ba))\n return chunks\n\n def handle(self):\n packet_size_data = self.read_data(1)\n if not packet_size_data:\n return\n\n packet_size = int.from_bytes(packet_size_data, 'big')\n print ('parsing %d bytes long payload' % packet_size)\n payload = self.read_data(packet_size)\n print ('received %s' % payload)\n\n if not payload:\n return\n\n chunx_message = updater_pb2.FChunxMessage()\n chunx_message.ParseFromString(payload)\n\n\n if chunx_message.MessageType == 1:\n print ('version request')\n rom = self.get_latest_rom()\n if rom == None:\n chunx_message.Version.VersionString = ''\n else:\n chunx_message.Version.VersionString = rom.replace('.bin', '')\n self.send_message(chunx_message)\n if chunx_message.MessageType == 2:\n print ('updated rom request')\n rom = self.get_latest_rom()\n if rom == None:\n print ('No rom available')\n else:\n chunks = self.get_rom_chunks(rom)\n num_chunks = len(chunks)\n current_chunk_id = 1\n for chunk in chunks:\n chunx_message.UpdateBinary.RomChunk = bytes(chunk)\n chunx_message.UpdateBinary.ChunkId = current_chunk_id\n chunx_message.UpdateBinary.ChunkMax = num_chunks\n self.send_message(chunx_message)\n current_chunk_id = current_chunk_id + 1\n #chunx_message.UpdateBinary.Rom = b'SomeTrashData'\n \n def finish(self):\n self.request.close()","repo_name":"snaiperskaya96/ChunxBudBackend","sub_path":"src/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"2444064909","text":"firstName= \"Elvis\"\nlastName = \"Garcia\"\nfavoriteMeal = \"Spanish Food/Dominican Food\"\nfavoriteColor = \"Violet\"\n\n#F-Strings\n'''\nprint(f\"My first name is {firstName} my last name is {lastName} my favorite meal is {favoriteMeal} and my favorite is {favoriteColor}\")\n'''\n\n#Argument by name:\n'''\nsolution = \"My first name is {a} my last name is {b} my favorite meal is {c} and my favorite color is {d}\".format(b=\"Garcia\", d=\"Blue\", a=\"Elvis\", c=\"Mangu\")\nprint(solution)\n'''\n\n\n#Argument by position: \n# #When you're counting positioning. YOU START COUNTING AT 0. \n'''\nsolution2 = \"My first name is {0} my last name is {1} my favorite meal is {2} and my favorite color is {3}\".format(\"Elvis\", \"Garcia\", \"Mangu\", \"Purple\")\n'''\n\n#Concatenation: \n'''\nsolution3 = \"My first name is \" + firstName + \" my last name is \" + lastName + \" my favorite meal is \" + \" Mangu \" + \"my favorite color is \" + \" Orange.\"\nprint(solution3)\n'''\n\n\n#Hybrid Model: Argument by Name and Position: \nage = 27\nnumOfCryptosOwned = 15\nsolution4 = \"I am {0} years old, and I am {e}, and I own like {1} cryptocurrencies\".format(age, numOfCryptosOwned, e=\"Latino\")\nprint(solution4)","repo_name":"InstructorElvis/demo","sub_path":"Activity4_soln.py","file_name":"Activity4_soln.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21337212653","text":"#! CCI.py\n# Commodity Channel Index Strategy\nimport numpy as np\nimport pandas as pd\nfrom bokeh.models import ColumnDataSource\n\nfrom core.strategies.indicators.CCI import CCI\nfrom core.strategies.base_strategy import BaseStrategy\n\n\nclass CCIConfig:\n def __init__(self):\n self.constant = 0.015\n self.history = 90\n self.uplevel = 100\n self.downlevel = -100\n self.persisted = 0\n\n\nclass CCITrend:\n def __init__(self, direction=None):\n self.direction = direction\n self.duration = 0\n self.persisted = False\n self.adviced = False\n\n\nclass CCIStrategy(BaseStrategy):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.currentTrend = None\n\n self.age = 0\n self.trend = CCITrend(\"undefined\")\n self.config = CCIConfig()\n self.ppoadv = 'none'\n\n self.cci = CCI(self.config.constant, self.config.history)\n\n self.signalsDataSource = {}\n for ticker in self.tickers:\n self.signalsDataSource[ticker] = ColumnDataSource()\n self.reset_column_data_sources()\n\n self.init_signals()\n\n # Abstract methods\n def init_signals(self):\n for ticker in self.tickers:\n self.signals[ticker] = pd.DataFrame(dict(Date=[],\n signal=[],\n CCI=[],\n positions=[]))\n self.signals[ticker].set_index(\"Date\", inplace=True)\n\n def calc_signals(self, history):\n for ticker in self.tickers:\n self.cci.update(history.loc[ticker].iloc[-1])\n\n lastPrice = history.loc[ticker][\"Close\"].iloc[-1]\n self.age += 1\n signal = 0\n position = 0\n if self.cci.result is not None:\n if self.cci.result >= self.config.uplevel and \\\n (self.trend.persisted or self.config.persisted == 0) and \\\n not self.trend.adviced and self.trend.direction is \"overbought\":\n self.trend.adviced = True\n self.trend.duration += 1\n signal = 0\n position = -1\n elif self.cci.result >= self.config.uplevel and self.trend.direction != 'overbought':\n self.trend.duration = 1\n self.trend.direction = 'overbought'\n self.trend.persisted = False\n self.trend.adviced = False\n if self.config.persisted == 0:\n self.trend.adviced = True\n signal = 0\n position = -1\n elif self.cci.result >= self.config.uplevel:\n self.trend.duration += 1\n if self.trend.duration >= self.config.persisted:\n self.trend.persisted = True\n\n elif self.cci.result <= self.config.downlevel and \\\n (self.trend.persisted or self.config.persisted == 0) and \\\n not self.trend.adviced and self.trend.direction == 'oversold':\n self.trend.adviced = True\n self.trend.duration += 1\n signal = 1\n position = 1\n elif self.cci.result <= self.config.downlevel and self.trend.direction != 'oversold':\n self.trend.duration = 1\n self.trend.direction = 'oversold'\n self.trend.persisted = False\n self.trend.adviced = False\n if self.config.persisted == 0:\n self.trend.adviced = True\n signal = 1\n position = 1\n elif self.cci.result <= self.config.downlevel:\n self.trend.duration += 1\n if self.trend.duration >= self.config.persisted:\n self.trend.persisted = True\n else:\n if self.trend.direction != 'nodirection':\n self.trend = CCITrend(\"nodirection\")\n else:\n self.trend.duration += 1\n signal = 0\n position = 0\n else:\n signal = 0\n position = 0\n\n self.signals[ticker].loc[history.loc[ticker].iloc[-1].name] = 0\n self.signals[ticker][\"signal\"] = signal\n self.signals[ticker][\"CCI\"] = self.cci.result\n self.signals[ticker][\"positions\"] = position\n\n def init_plot(self, plot_area):\n for ticker in self.tickers:\n self.cci_visu = plot_area.select_one({'name': ticker + '_cci'})\n if not self.cci_visu:\n self.cci_visu = plot_area.line(x='Date',\n y='CCI',\n source=self.signalsDataSource[ticker],\n legend_label=ticker + \" CCI\",\n line_color=\"blue\",\n name=ticker+'_cci')\n else:\n self.signalsDataSource[ticker] = self.cci_visu.data_source\n\n def plot(self):\n for ticker in self.tickers:\n if ticker not in self.signals:\n continue\n signal_data = dict(\n Date=[self.signals[ticker].iloc[-1].name],\n signal=[self.signals[ticker].iloc[-1].signal],\n CCI=[self.signals[ticker].iloc[-1].CCI],\n positions=[self.signals[ticker].iloc[-1].positions]\n )\n self.signalsDataSource[ticker].stream(signal_data)\n\n def __del__(self):\n self.reset_column_data_sources()\n\n def reset_column_data_sources(self):\n for ticker in self.tickers:\n self.signalsDataSource[ticker].data = dict(Date=[],\n signal=[],\n CCI=[],\n positions=[])\n\n","repo_name":"swordey/vitrab","sub_path":"core/strategies/CCI.py","file_name":"CCI.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"12085955710","text":"import sys\nfrom collections import deque\n\nsys.setrecursionlimit(10**6)\n\ninput = sys.stdin.readline\n\n\ndef debug(*args, sep=None):\n if sep is None and hasattr(args[0], \"__iter__\"):\n sep = \"\\n\"\n print(\"Debug:\", *args, file=sys.stderr, sep=sep)\n\n\ndef main():\n N, M = map(int, input().split())\n A = list(map(int, input().split()))\n A.sort()\n\n que = deque()\n ans = 0\n for a in A:\n que.append(a)\n while que[-1] - que[0] >= M:\n que.popleft()\n ans = max(ans, len(que))\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"radiol/atcoder-rye-ruff","sub_path":"abc326/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21928022734","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pulp\n\ndef compute_distance(lon_a, lat_a, lon_b, lat_b):\n \"\"\"\n 緯度経度から距離を計算する\n 地点A(経度lon_a, 緯度lat_a)、地点B(経度lon_b, 緯度lat_b)\n \n Parameters\n ----------\n lon_a : float\n 地点1の経度\n lat_a : float\n 地点1の緯度\n lon_b : float\n 地点2の経度\n lat_b : float\n 地点2の緯度\n \n Returns\n -------\n rho : float\n 2地点間の距離(km)\n \"\"\"\n if (lon_a == lon_b) and (lat_a == lat_b):\n return 0.0\n \n ra = 6378.140 # equatorial radius (km)\n rb = 6356.755 # polar radius (km)\n F = (ra - rb) / ra # flattening of the earth\n rad_lat_a = np.radians(lat_a)\n rad_lon_a = np.radians(lon_a)\n rad_lat_b = np.radians(lat_b)\n rad_lon_b = np.radians(lon_b)\n pa = np.arctan(rb / ra * np.tan(rad_lat_a))\n pb = np.arctan(rb / ra * np.tan(rad_lat_b))\n xx = np.arccos(np.sin(pa) * np.sin(pb) + np.cos(pa) * np.cos(pb) * np.cos(rad_lon_a - rad_lon_b))\n c1 = (np.sin(xx) - xx) * (np.sin(pa) + np.sin(pb))**2 / np.cos(xx / 2)**2\n c2 = (np.sin(xx) + xx) * (np.sin(pa) - np.sin(pb))**2 / np.sin(xx / 2)**2\n dr = F / 8 * (c1 - c2)\n rho = ra * (xx + dr)\n return rho\n\nclass PCTSP:\n \"\"\"\n prize collecting traveling salesman problem\n\n Attributes\n ----------\n I : array of int\n 観光地名id(0以上の連続した数列)\n a : array of int or float\n 観光地の満足度\n b : array of int or float\n 観光地の滞在時間\n pos : ndarray of float\n [[x座標, y座標]_pos1, [x座標, y座標]_pos2]\n 観光地の座標\n speed : float or int\n 移動速度\n c : array of int\n 観光地ごとの費用\n m : ndarray(N,N)\n [観光地1[観光地1までの費用, 観光地2までの費用, ...], \n 観光地2[観光地1までの費用, 観光地2までの費用, ...], \n ...\n ]\n x : dict\n {(0, 1): x(0,1),\n (0, 2): x(0,2),\n (0, 3): x(0,3),...\n }\n 地点間の道を通るか否かの最適化問題の変数\n y : dict\n 観光地を訪問するか否かの変数\n f : dict\n 道順の変数(1以上の場合)\n \"\"\"\n \n def __init__(self, I, a, b, pos, speed, c, m):\n \"\"\" \n parameters\n ----------\n I : array of int\n 観光地名id(0以上の連続した数列)\n a : array of int or float\n 観光地の満足度\n b : array of int or float\n 観光地の滞在時間\n pos : ndarray of float\n [[x座標, y座標]_pos1, [x座標, y座標]_pos2]\n 観光地の座標\n speed : float or int\n 移動速度\n c : array of int\n 観光地ごとの費用\n m : ndarray(N,N)\n [観光地1[観光地1までの費用, 観光地2までの費用, ...], \n 観光地2[観光地1までの費用, 観光地2までの費用, ...], \n ...\n ]\n \"\"\"\n self.I = I\n self.a = a\n self.b = b\n self.c = c\n self.d = [[compute_distance(pos_i[0], pos_i[1], pos_j[0], pos_j[1]) / speed \n for pos_i in pos] for pos_j in pos]\n self.m = m\n self.pos = pos\n \n \n def plot_map(self, x_min, x_max, y_min, y_max):\n \"\"\" \n マップの描画\n \n Parameters\n ----------\n x_min : float\n マップの描画範囲のx軸方向の最小値\n x_max : float\n マップの描画範囲のx軸方向の最大値\n x_min : float\n マップの描画範囲のy軸方向の最小値\n x_max : float\n マップの描画範囲のy軸方向の最大値\n \"\"\"\n plt.figure(figsize=(15,15))\n\n for pos_i, name_i, a_i, b_i in zip(self.pos, map(str, self.I), \n map(str, self.a), map(str, self.b)):\n plt.scatter(pos_i[0], pos_i[1])\n plt.annotate(f'name:{name_i}\\nsatisf:{a_i}\\nstay:{b_i}',\n xy=(pos_i[0], pos_i[1]))\n\n plt.xlim([x_min, x_max])\n plt.ylim([y_min, y_max])\n plt.grid()\n plt.show()\n \n def plot_route(self, x, x_min, x_max, y_min, y_max):\n \"\"\" \n 経路の描画\n \n Parameters\n ----------\n x : dict\n self.solveで得られた解\n x_min : float\n マップの描画範囲のx軸方向の最小値\n x_max : float\n マップの描画範囲のx軸方向の最大値\n x_min : float\n マップの描画範囲のy軸方向の最小値\n x_max : float\n マップの描画範囲のy軸方向の最大値\n \"\"\"\n \n plt.figure(figsize=(15,15))\n for pos_i, name_i, a_i, b_i in zip(self.pos, map(str, self.I), \n map(str, self.a), map(str, self.b)):\n plt.scatter(pos_i[0], pos_i[1])\n plt.annotate(f'name:{name_i}\\nsatisf:{a_i}\\nstay:{b_i}',\n xy=(pos_i[0], pos_i[1]))\n\n for i in I:\n for j in I:\n if i != j and x[i,j].value() == 1:\n plt.annotate('', xy=pos[i], xytext=pos[j], \n arrowprops=dict(shrink=0, width=1, headwidth=8, \n headlength=10, connectionstyle='arc3',\n facecolor='gray', edgecolor='gray'))\n\n plt.xlim([x_min, x_max])\n plt.ylim([y_min, y_max])\n plt.grid()\n plt.show()\n \n \n def formulate(self, start, T, C):\n \"\"\"\n 最適化問題を定式化\n \n Parameters\n ----------\n start : int\n 初期地点\n T : float\n 旅行時間\n C : int\n 旅費\n \n Returns\n -------\n problem\n pulp形式の最適化問題\n \n \"\"\"\n \n # 数理最適化問題(最大化)を宣言\n problem = pulp.LpProblem(\"problem\", pulp.LpMaximize)\n\n # 変数を定義\n self.y = {}\n for i in self.I:\n self.y[i] = pulp.LpVariable(f'y_{i}', 0, 1, pulp.LpInteger)\n\n self.x = {}\n for i in self.I:\n for j in self.I:\n if i != j:\n self.x[i,j] = pulp.LpVariable(f'x({i},{j})', 0, 1, pulp.LpInteger)\n\n self.f = {}\n for i in self.I:\n for j in self.I:\n if i != j:\n self.f[i,j] = pulp.LpVariable(f'f({i},{j})', 0, len(self.I), pulp.LpInteger)\n\n # 目的関数\n objective = pulp.lpSum(self.a[i] * self.y[i] for i in self.I)\n problem += objective\n\n # 制約条件\n ## 時間制約\n problem += pulp.lpSum(self.d[i][j] * self.x[i,j] for i in self.I for j in self.I if i != j) +\\\n pulp.lpSum(self.b[i] * self.y[i] for i in self.I) <= T\n\n ## 費用制約\n problem += pulp.lpSum(self.m[i][j] * self.x[i,j] for i in self.I for j in self.I if i != j) +\\\n pulp.lpSum(self.c[i] * self.y[i] for i in self.I) <= C\n\n ## 観光地を訪れるのは各1回\n for i in self.I:\n problem += pulp.lpSum(self.x[i,j] for j in self.I if i != j) == self.y[i]\n\n for j in self.I:\n problem += pulp.lpSum(self.x[i,j] for i in self.I if i != j) == self.y[j]\n\n ## 部分巡回路を排除\n for i in self.I:\n if i == start:\n for j in self.I:\n if i != j:\n problem += self.f[i,j] == 0\n continue\n\n problem += pulp.lpSum(self.f[h,i] for h in self.I if i != h) + self.y[i] == pulp.lpSum(self.f[i,j] for j in self.I if i != j)\n\n for i in self.I:\n for j in self.I:\n if i != j:\n problem += self.f[i,j] <= len(self.I) * self.x[i,j]\n\n ## スタート地点を必ず通るようにする\n problem += self.y[start] == 1\n \n return problem\n \n def solve(self, start, T, C, threads=4, timeLimit=1):\n \"\"\"\n 最適化問題を解く\n \n Attribute\n ---------\n start : int\n 初期地点\n T : float\n 旅行時間\n C : int\n 旅費\n threads : int\n 並列数\n timeLimit : int\n 問題を解く制限時間\n \n Returns\n -------\n x\n 最適化問題の解\n \"\"\"\n problem = self.formulate(start, T, C)\n solver = pulp.PULP_CBC_CMD(threads=threads, timeLimit=timeLimit)\n result_status = problem.solve(solver)\n \n # 実行可能解が存在している\n if result_status == 1:\n return self.x\n else:\n print(\"実行可能解が存在しません\")\n return False\n \n def show_route(self, start, T, C, threads=4, timeLimit=1):\n \"\"\"\n 解となる観光地idの順番を返す\n \n Attribute\n ---------\n start : int\n 初期地点\n T : float\n 旅行時間\n C : int\n 旅費\n threads : int\n 並列数\n timeLimit : int\n 問題を解く制限時間\n \n Returns\n -------\n x : list\n 観光地の巡回順\n \"\"\"\n \n ans = self.solve(start, T, C, threads=4, timeLimit=1)\n\n if ans: \n route_dict = {k: v.value() for k, v in filter(lambda v: v[1].value() >= 1, self.f.items())}\n route_dict = sorted(route_dict.items(), key=lambda x: x[1])\n route = [start] + [v[0][0] for v in route_dict]\n\n return route\n else:\n return start\n ","repo_name":"plue1011/combinatorial-optimization","sub_path":"TSP/PCTSP.py","file_name":"PCTSP.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3766189143","text":"from _thread import start_new_thread\n\nfrom main.stack import Stack\nfrom main.mqtt import Subscribe, Publish\nfrom main.scheduler import Scheduler\n\n\ndef start():\n\tpublish_stack = Stack()\n\tpublish_stack.read_buffer()\n\tpublish_stack.clear_buffer()\n\n\tsubscribe_stack = Stack()\n\n\tmqtt_subscribe = Subscribe(subscribe_stack)\n\tstart_new_thread(mqtt_subscribe.connect, ())\n\n\tmqtt_publish = Publish(publish_stack)\n\tstart_new_thread(mqtt_publish.connect, ())\n\n\toperation_scheduler = Scheduler(subscribe_stack, publish_stack)\n\toperation_scheduler.start()\n","repo_name":"adenauery/EXEHDA-Gateway","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22124430463","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/9/28 20:41\n# @Author : lagelanren\n# @Email : forhaogege@163.com\n# @File : 200.py\n# @Software: PyCharm\n\nimport re, os, time\n# 正则匹配待替换得代码\nre_str = \"TCP_ERROR[\\S\\s]*?TCP_BIN_ERR[\\S\\s]*?\\);\"\n# 正则匹配参数\nre_str_params = \"\\(([\\S\\s]*)?\\);\"\n# 匹配注释\nre_str_notes = \"/\\*[\\S\\s]*?\\*/\"\n# 替换代码模板\nnew_code_template_one = 'TcpErrLog({}, {}, {}, {}, {}, {});'\nnew_code_template_two = 'TcpErrLog({}, {}, {}, {}, \"\");'\n\ndef work(base_dir):\n # result_dir = os.path.join(base_dir, \"result\")\n # print(base_dir)\n for item_file in os.listdir(base_dir):\n # 遍历文件夹目录,如果是还是文件夹,递归\n if os.path.isdir(os.path.join(base_dir, item_file)):\n work(os.path.join(base_dir, item_file))\n elif item_file.endswith(\"c\"):\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n with open(os.path.join(base_dir, item_file), \"r\") as file:\n code = file.read()\n code_new = code\n waiter_code = re.findall(re_str, code)\n for item in waiter_code:\n part_excess = []\n item = item.replace(\"%d,\", \"%@\")\n item = item.replace(\"%u,\", \"%#\")\n item_shadow = re.sub(re_str_notes, \"\", item)\n if (\"%s\" in item_shadow):\n continue\n if(len(re.findall('TCP_', item_shadow)) > 2):\n part_excess = item_shadow.split(\";\")[0:-3]\n part_one, part_two = item_shadow.split(\";\")[-3:-1]\n else:\n part_one, part_two, _ = item_shadow.split(\";\")\n part_one = part_one.strip() + \";\"\n part_two = part_two.strip() + \";\"\n part_one_params = re.findall(re_str_params, part_one)[0].split(\",\")\n part_two_params = re.findall(re_str_params, part_two)[0].split(\",\")\n part_one_params = [item_params.strip() for item_params in part_one_params]\n part_two_params = [item_params.strip() for item_params in part_two_params]\n # print(\"+++++++++++++++++++++++++++++++\")\n # print(part_one_params)\n # print(part_two_params)\n # print(len(part_one_params[4:]))\n # print(\"++++++++++++++++++++++++++++++++\")\n if (len(part_one_params[4:]) <= 4):\n if (\"%\" in part_one_params[3]):\n item_new_code = new_code_template_one.format(part_two_params[0], part_two_params[2],\n part_one_params[2], part_one_params[3],\n '\"' + \"%\"+\"%\".join(part_one_params[3].split(\"%\")[1:]).strip(),\n \", \".join(part_one_params[4:]))\n else:\n # print(part_two_params)\n item_new_code = new_code_template_two.format(part_two_params[0], part_two_params[2],\n part_one_params[2], part_one_params[3])\n item = item.replace(\"%@\", \"%d,\")\n item_new_code = item_new_code.replace(\"%@\", \"%d,\")\n item = item.replace(\"%#\", \"%u,\")\n item_new_code = item_new_code.replace(\"%#\", \"%u,\")\n if part_excess:\n part_excess = \";\".join(part_excess) + \";\"\n item_new_code = part_excess + item_new_code\n print(item_new_code)\n code_new = code_new.replace(item, item_new_code)\n else:\n print(part_one_params)\n print(part_two_params)\n\n with open(os.path.join(base_dir, item_file), \"w\", encoding=\"GB2312\") as file:\n file.write(code_new)\n print(item_file + \"--------处理完成\")\n\nif __name__ == '__main__':\n base_dir = input(\"输入文件夹\")\n work(base_dir)\n\n\n\n\n\n\n","repo_name":"haogegeya/Fivetwozero","sub_path":"200_1.py","file_name":"200_1.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"7161281191","text":"#!/usr/bin/env python3\nimport turtle\nimport random\nimport time\n\ncolors=['red','black','gold','blue']\n\ntim=turtle.Turtle()\ntim.speed(0)\ntim.shapesize(5,5,5)\ntim.width(10)\ntim.shape('arrow')\ntim.setpos(0,0)\n\nlen=30\ndef up():\n global len\n tim.setheading(90)\n tim.forward(len)\n\ndef down():\n global len\n tim.setheading(270)\n tim.forward(len)\n\ndef right():\n global len\n tim.setheading(0)\n tim.forward(len)\n\ndef left():\n global len\n tim.setheading(180)\n tim.forward(len)\n\ndef dragging(x,y):\n global colors\n turtle.ondrag(None)\n color1=random.choice(colors)\n color2=random.choice(colors)\n tim.color(color1,color2)\n tim.setheading(tim.towards(x,y))\n tim.goto(x,y)\n turtle.ondrag(dragging)\ndef click_left_mouse_button(x,y):\n global colors\n color1=random.choice(colors)\n color2=random.choice(colors)\n tim.color(color1,color2)\n tim.forward(50)\ndef click_right_mouse_button(x,y):\n tim.stamp()\n\ndef click_x_key():\n turtle.clearscreen()\n\nturtle.color('black')\ntim.hideturtle()\nstyle=('Courier',10,'bold')\nturtle.write(\"press arrow keys,right and left click and drag the turtle from its starting position\",font=style,align='center')\ntime.sleep(6)\ntim.showturtle()\nturtle.clearscreen()\n\n\nturtle.onscreenclick(click_left_mouse_button,1)\nturtle.onscreenclick(click_right_mouse_button,3)\nturtle.ondrag(dragging)\nturtle.onkey(click_x_key,'x')\nturtle.onkey(up,'Up')\nturtle.onkey(down,'Down')\nturtle.onkey(right,'Right')\nturtle.onkey(left,'Left')\nturtle.listen()\nturtle.mainloop()\n","repo_name":"perdikeas/python","sub_path":"programs/turtle_key_presses_and_events.py","file_name":"turtle_key_presses_and_events.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4340329295","text":"import numpy\nfrom prettytable import PrettyTable\n\nn = int(input(\"Enter the number of variables or the number of eqn - \"))\nac = int(input(\"Input the number of decimal accuracy places\"))\naccuracy = float(0.5 * (10**(-ac)))\nmat = numpy.zeros((n,n + 1))\n\n\nvar = []\nprev_var = []\ncol_name = []\n\n\nfor i in range(0,n):\n col_name.append(input(\"Enter the variable name for \" + str(i + 1) + \" variable - \"))\n\n\n# eqn data input from the user\nfor i in range(0,n):\n for j in range (0,n + 1):\n if j < n : \n mat[i][j] = float(input(\"Enter the coefficient of \" + col_name[j] + \" varaiable of equation number \" + str(i + 1) + \" - \" ))\n else:\n mat[i][j] = float(input(\"Enter the constant term of equation number \" + str(i + 1) + \" - \"))\n\nprint(mat) \n\n\n#initial assumption of data from the user\nfor i in range(0,n):\n var.append(float(input(\"Enter the inital value for \" + col_name[i] + \" - \")))\n prev_var.append(0.0)\n\n\n\n\ntable = PrettyTable(col_name)\nftable = PrettyTable(col_name)\ntable.title = \"Gauss siedal method\"\n\n\nprint(var)\n\n\nprocess = True\ndata_valid = True\ncount = 1\n\n\n#data validation\nfor i in range(0,n):\n for j in range(0, n):\n if (mat[i][j] < mat[i][i]) or ((i == j) and mat[i][j] == mat[i][i]):\n data_valid = True\n else:\n data_valid = False\n process = False\n \n\nif data_valid == False:\n print(\"The input data in not in diagonal dominant form\")\n\n\n\nwhile process == True :\n\n #calculation part\n for i in range(0,n):\n y = 0\n for j in range(0 , n + 1):\n if (i == j):\n x = 1/mat[i][j]\n elif ((i != j) and (j != n)):\n y = y + (-1)*(mat[i][j])*var[j]\n elif (j == n):\n y = y + mat[i][j]\n\n var[i] = x * y\n var[i] = round(var[i],ac)\n\n\n ans_valid = 0\n\n\n table.add_row(var)\n\n\n #accuracy check\n for i in range(0,n):\n diff = abs(prev_var[i] - var[i])\n if (diff < accuracy):\n \n ans_valid = ans_valid + 1\n else:\n prev_var[i] = var[i]\n ans_valid = 0\n\n if ans_valid == n:\n process = False\n print(table)\n print(\"The final value of the variables are\")\n ftable.add_row(var)\n print(ftable)\n\n \n\n ","repo_name":"Shrawak/Gauss-sidel-method","sub_path":"gs2.py","file_name":"gs2.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23180164088","text":"from unittest import TestCase, mock, main\nfrom acomplamento_exemplo import page_content\n\n\nclass TestPageContent(TestCase):\n def test_page_content_deve_ser_chamada_com_http(self):\n\n with mock.patch('acomplamento_exemplo.get') as spy:\n page_content('bababa', False)\n\n spy.assert_called_with('http://bababa', None)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dunossauro/live-de-python","sub_path":"codigo/Live076/spy_exemplo.py","file_name":"spy_exemplo.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1024,"dataset":"github-code","pt":"4"} +{"seq_id":"13334890435","text":"\"\"\"\nConcatenate data from the cell jpegs. The other method of creating the data did not verify that the boxes\nwere extracted correctly, leading to completely bad images. It is easy to clean out the mess by erasing the\nbad jpegs.\n\"\"\"\n\nimport numpy as np\nimport os\nimport cv2\nimport backend.constants as c\n\n\nfor number in c.CLASSES:\n X_data = []\n y_data = []\n print(number)\n directory = os.path.join(c.CELLS_DIR, str(number))\n for file_name in os.listdir(directory):\n file_path = os.path.join(directory, file_name)\n cell = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)\n X_data.append(cell)\n y_data.append(number) # that is so wrong, ... [number]*n ...\n data_name = f\"{number}_more.npz\"\n np.savez(\n os.path.join(c.DATA_PATH, data_name),\n X_data=np.array(X_data),\n y_data=np.array(y_data)\n )\n print(f\"Saved data of {len(y_data)} cell images to {data_name}\")\n","repo_name":"elnabla/WuerflerApp","sub_path":"backend/model_generation/prepare_data_from_cells.py","file_name":"prepare_data_from_cells.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1742268118","text":"import os\nimport flask_sqlalchemy\nimport geojson\nimport json\nimport datetime as pydatetime\nimport string\nimport pytz\nimport random\n\nfrom uuid import uuid4\nfrom hashlib import sha256\nfrom geoalchemy2.shape import from_shape, to_shape\nfrom sqlalchemy import desc, inspect, text, func\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.sql.functions import concat\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.dialects.postgresql import JSONB, UUID, INTERVAL\nfrom shapely.geometry import shape, mapping, box\nfrom itsdangerous import TimedJSONWebSignatureSerializer, BadSignature,\\\n SignatureExpired\nfrom geoalchemy2 import Geometry\nfrom blinker import Namespace\nfrom sqlalchemy import event\nfrom flask import url_for\nfrom app.grid import grid_for_bbox\nfrom app.utils import orientation_for_bbox\n\n\ndb = flask_sqlalchemy.SQLAlchemy()\ndb_signals = Namespace()\n\n\ndef _gen_secret(length=24):\n chars = string.ascii_letters + string.digits\n return ''.join(random.SystemRandom().choice(chars) for _ in range(length))\n\n\nclass Map(db.Model):\n uuid = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4)\n secret = db.Column(db.Unicode, default=_gen_secret)\n name = db.Column(db.Unicode)\n description = db.Column(db.Unicode)\n place = db.Column(db.Unicode)\n _datetime = db.Column(db.DateTime(timezone=True), default=func.now())\n _bbox = db.Column(Geometry('POLYGON'))\n features = db.relationship('Feature', backref='map', lazy=True, order_by=\"Feature.id\", cascade=\"all, delete-orphan\")\n attributes = db.Column(JSONB)\n published = db.Column(db.Boolean, default=False)\n lifespan = db.Column(db.Interval, default=pydatetime.timedelta(days=30))\n theme = db.Column(db.Unicode, default='bright')\n\n on_created = db_signals.signal('map-created')\n on_updated = db_signals.signal('map-updated')\n on_deleted = db_signals.signal('map-deleted')\n\n def __init__(self, name, **kwargs):\n self.name = name\n\n for k,v in kwargs.items():\n setattr(self, k,v)\n\n @property\n def serializer(self):\n if not hasattr(self, '_serializer'):\n self._serializer = TimedJSONWebSignatureSerializer(self.secret,\n expires_in=60*60*2)\n return self._serializer\n\n @classmethod\n def all(cls):\n return db.session.query(Map).filter(Map._bbox.isnot(None),\n Map.published.is_(True), \\\n Map.outdated.is_(False)) \\\n .order_by(desc(Map._datetime)) \\\n .all()\n\n @classmethod\n def get(cls, uuid):\n try:\n return db.session.query(Map).filter(Map.uuid == uuid).first()\n except:\n pass\n return None\n\n @classmethod\n def find(cls, name):\n try:\n return db.session.query(Map).filter(Map.name == name).first()\n except:\n pass\n return None\n\n @classmethod\n def delete(cls, uuid):\n db.session.delete(cls.get(uuid))\n db.session.commit()\n\n @hybrid_property\n def outdated(self):\n now = pydatetime.datetime.utcnow().replace(tzinfo=pytz.UTC)\n return self._datetime < (now - self.lifespan)\n\n @outdated.expression\n def outdated(cls):\n return cls._datetime < (func.now()-cls.lifespan)\n\n @property\n def orientation(self):\n if self._bbox is None:\n return ''\n return orientation_for_bbox(*self.bbox)\n\n @property\n def grid(self):\n if (self.bbox):\n cells = {\n '': [7, 7],\n 'landscape': [9, 5],\n 'portrait': [5, 9],\n }\n return grid_for_bbox(*self.bbox, *cells[self.orientation], 'violet')\n return None\n\n @property\n def version(self):\n data = self.to_dict(False, features_included=True)\n raw = json.dumps(data, separators=(',', ':'), sort_keys=True)\n return sha256(raw.encode()).hexdigest()\n\n @hybrid_property\n def datetime(self):\n return self._datetime.astimezone(pydatetime.timezone.utc)\n\n @datetime.setter\n def datetime(self, d):\n self._datetime = d\n\n @hybrid_property\n def time(self):\n return self.datetime.time()\n\n @time.setter\n def time(self, t):\n args = {'hour': t.hour, 'minute': t.minute, 'second': t.second}\n if self._datetime:\n self._datetime.replace(**args)\n else:\n self._datetime = pydatetime.datetime(**args)\n\n @hybrid_property\n def date(self):\n return self.datetime.date()\n\n @date.setter\n def date(self, value):\n args = [value.year, value.month, value.day]\n if self._datetime:\n self._datetime.replace(*args)\n else:\n self._datetime = pydatetime.datetime(*args)\n\n @hybrid_property\n def bbox(self):\n if (self._bbox is None):\n return None\n return to_shape(self._bbox).bounds\n\n @bbox.setter\n def bbox(self, value):\n if (value is not None):\n self._bbox = from_shape(box(*value))\n\n def to_dict(self, version_included=True, secret_included=False, grid_included=False, features_included=False):\n data = {\n 'id': self.uuid.hex,\n 'name': self.name,\n 'description': self.description,\n 'datetime': self.datetime.isoformat(),\n 'attributes': self.attributes if self.attributes else [],\n 'bbox': self.bbox,\n 'place': self.place,\n 'lifespan': self.lifespan.days,\n 'published': self.published,\n 'theme': self.theme\n }\n\n if version_included:\n data['version'] = self.version\n\n if secret_included:\n data['secret'] = self.secret\n\n if grid_included:\n data['grid'] = self.grid\n\n if features_included:\n data['features'] = [f.to_dict() for f in self.features]\n\n return data\n\n def gen_token(self):\n return self.serializer.dumps(self.uuid.hex).decode('utf-8')\n\n def check_token(self, token):\n try:\n self.serializer.loads(token)\n except SignatureExpired:\n return False # valid token, but expired\n except BadSignature:\n return False # invalid token\n\n return True\n\n def publish(self):\n self.published = True\n db.session.add(self)\n db.session.commit()\n\n\nclass Feature(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n map_uuid = db.Column(UUID(as_uuid=True), db.ForeignKey('map.uuid'), nullable=False)\n _geo = db.Column(Geometry())\n style = db.Column(JSONB)\n\n on_created = db_signals.signal('feature-created')\n on_updated = db_signals.signal('feature-updated')\n on_deleted = db_signals.signal('feature-deleted')\n\n def __init__(self, feature):\n if 'geometry' in feature:\n self.geo = feature['geometry']\n\n if 'properties' in feature:\n self.style = feature['properties']\n\n @classmethod\n def get(cls, id):\n return db.session.query(Feature).get(id)\n\n @hybrid_property\n def geo(self):\n return mapping(to_shape(self._geo))\n\n @geo.setter\n def geo(self, value):\n if 'properties' in value:\n self.style = value['properties']\n\n self._geo = from_shape(shape(value))\n\n def to_dict(self):\n properties = self.style.copy() if self.style else {}\n\n properties['id'] = self.id\n properties['map_id'] = self.map.uuid.hex\n\n return geojson.Feature(geometry=self.geo, properties=properties)\n\n\n@event.listens_for(db.session, 'after_commit')\ndef receive_after_commit(session):\n for action in ['created', 'updated', 'deleted']:\n if action in session.info:\n for (cls, data) in session.info[action]:\n getattr(cls, 'on_' + action).send(data)\n\n\n@event.listens_for(db.session, 'before_flush')\ndef receive_before_flush(session, flush_context, instances):\n events = [('created', session.new), ('updated', session.dirty),\n ('deleted', session.deleted)]\n for (action, instances) in events:\n session.info[action] = [obj for obj in instances\n if action == 'deleted' or\n session.is_modified(obj, False)]\n\n\ndef _get_history(obj):\n hist = {}\n for attr in inspect(obj).attrs:\n if attr.history.has_changes():\n try:\n hist[attr.key] = attr.history.deleted[-1]\n except IndexError:\n pass\n return hist\n\n@event.listens_for(db.session, 'after_flush')\ndef receive_after_flush(session, flush_context):\n for action in ['created', 'updated', 'deleted']:\n if action in session.info:\n session.info[action] = [(obj.__class__, {\n 'new': obj.to_dict(), 'old': _get_history(obj)\n }) for obj in session.info[action]]\n","repo_name":"aktionskarten/backend","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"35674998741","text":"data_list = ['gpcr', 'enzyme', 'ic', 'nr']\nlambda_type_list = ['lambda', 'lambdaA', 'lambdaB']\n\nfor data in data_list:\n\tfor lambda_type in lambda_type_list:\n\t\tfr = open('result_1times10fold_msmf_weight_4sim_effect_' + lambda_type + '_' + data + '.txt','r');\n\n\t\tline = []\n\n\t\tfor i in fr:\n\t\t\ti = i.strip('\\n')\n\t\t\ti = i.split(',')\n\t\t\tif(i[0] == 'aupr'):\n\t\t\t\tcontinue\n\t\t\tline.append(i[0])\n\n\t\tauprStr = ','.join(line) + '\\n'\n\n\t\tfw = open('mstmf_weight_' + lambda_type + '_' + data + '.txt','a')\n\t\tfw.writelines(auprStr)\n\t\tfw.close()\n\n\t\tfr.close()","repo_name":"metropolis-x/drug-target-github","sub_path":"msmf-final/result/msmf-weight/get-AUPR-lambda.py","file_name":"get-AUPR-lambda.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34529483825","text":"# -*- coding:utf-8 -*-\n# @Time : 2018/12/26 17:45\n# @Author : Hodge\n# @Desc:\n\nclass Solution(object):\n def thirdMax(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums = set(nums)\n # print(nums)\n if len(nums) < 3:\n return max(nums)\n else:\n nums = list(nums)\n nums.sort()\n # print(nums)\n return nums[-3]\n\nA = [5,5,5,5,-2,4,-1,2,1,0]\nsl = Solution()\nprint(sl.thirdMax(A))","repo_name":"Hodgeli/LeetCode","sub_path":"array/easy/414.py","file_name":"414.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24192013","text":"def unescaped_length(line):\n\tline = line[1:len(line)-1]\n\tlength = 0\n\tskip = 0\n\tfor i in range(0, len(line)):\n\t\tif skip > 0:\n\t\t\tskip -= 1\n\t\t\tcontinue\n\t\t\n\t\tc = line[i]\n\t\tlength += 1\n\t\tif c == \"\\\\\": \n\t\t\td = line[i+1]\n\t\t\tif d == '\"':\n\t\t\t\tskip = 1\n\t\t\telif d == \"\\\\\":\n\t\t\t\tskip = 1\n\t\t\telif d == \"x\":\n\t\t\t\tskip = 3\n\t# print(\"'{}' ({})\".format(line, length))\n\treturn length\n\t\ndef encode(line):\n\toutput = \"\"\n\tfor char in line:\n\t\tif char == '\\\"':\n\t\t\toutput += '\\\\\\\"'\n\t\telif char == '\\\\':\n\t\t\toutput += '\\\\\\\\'\n\t\telse:\n\t\t\toutput += char\n\toutput = '\\\"{}\\\"'.format(output)\n\tprint(\"{} -> {}\".format(line, output))\n\treturn output\n\t\t","repo_name":"simonrozsival/advent-of-code","sub_path":"08/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36090582281","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Christian Heider Nielsen\"\n\n__doc__ = \"\"\"\nCreated on 27/04/2019\n\n@author: cnheider\n\"\"\"\n\n\nfrom typing import Sequence\n\nimport numpy\nfrom pycm import ConfusionMatrix\nfrom sorcery import dict_of\nfrom warg.data_structures.named_ordered_dictionary import NOD\n\nfrom munin.html_embeddings import MetricEntry, generate_math_html\n\n\ndef generate_metric_table(\n truths: Sequence, predictions: Sequence, categories: Sequence, decimals: int = 1\n) -> Sequence[MetricEntry]:\n \"\"\"\n\n :param truths:\n :param predictions:\n :param categories:\n :param decimals:\n :return:\n \"\"\"\n\n cm = ConfusionMatrix(actual_vector=truths, predict_vector=predictions)\n cm.relabel({k: v for k, v in zip(range(len(categories)), categories)})\n\n support = MetricEntry(\n \"Occurrence of each class (P)\",\n generate_math_html(\"TP+FN\"),\n {k: numpy.round(v, decimals) for k, v in cm.P.items()},\n numpy.round(sum(cm.P.values()) / len(categories), decimals),\n )\n\n sensitivity = MetricEntry(\n \"True Positive Rate (TPR)\",\n generate_math_html(\"\\dfrac{TP}{TP+FN}\"),\n {k: numpy.round(v, decimals) for k, v in cm.TPR.items()},\n numpy.round(sum(cm.TPR.values()) / len(categories), decimals),\n )\n\n specificity = MetricEntry(\n \"True Negative Rate (TNR)\",\n generate_math_html(\"\\dfrac{TN}{TN+FP}\"),\n {k: numpy.round(v, decimals) for k, v in cm.TNR.items()},\n numpy.round(sum(cm.TNR.values()) / len(categories), decimals),\n )\n\n precision = MetricEntry(\n \"Positive Predictive Rate (PPV)\",\n generate_math_html(\"\\dfrac{TP}{TP+FP}\"),\n {k: numpy.round(v, decimals) for k, v in cm.PPV.items()},\n numpy.round(sum(cm.PPV.values()) / len(categories), decimals),\n )\n\n npv = MetricEntry(\n \"Negative Predictive Value (NPV)\",\n generate_math_html(\"\\dfrac{TP}{TP+FP}\"),\n {k: numpy.round(v, decimals) for k, v in cm.NPV.items()},\n numpy.round(sum(cm.NPV.values()) / len(categories), decimals),\n )\n\n accuracy = MetricEntry(\n \"Trueness\",\n generate_math_html(\"\\dfrac{TP+TN}{TP+TN+FP+FN}\"),\n {k: numpy.round(v, decimals) for k, v in cm.ACC.items()},\n numpy.round(sum(cm.ACC.values()) / len(categories), decimals),\n )\n\n f1_score = MetricEntry(\n \"Harmonic mean of precision and sensitivity\",\n generate_math_html(\"2*\\dfrac{PPV*TPR}{PPV+TPR}\"),\n {k: numpy.round(v, decimals) for k, v in cm.F1.items()},\n numpy.round(sum(cm.F1.values()) / len(categories), decimals),\n )\n\n mcc = MetricEntry(\n \"Matthews correlation coefficient\",\n generate_math_html(\"\\dfrac{TP*TN-FP*FN}{\\sqrt{(TP+FP)(TP+FN)(TN+FP)(TN+FN)}}\"),\n {k: numpy.round(v, decimals) for k, v in cm.MCC.items()},\n numpy.round(sum(cm.MCC.values()) / len(categories), decimals),\n )\n\n roc_auc = MetricEntry(\n \"Receiver Operating Characteristics (ROC), \"\n \"Sensitivity vs (1 − Specificity), \"\n \"(True Positive Rate vs False Positive Rate), \"\n \"Area Under the Curve (AUC)\",\n generate_math_html(\"\\dfrac{TNR+TPR}{2}\"),\n {k: numpy.round(v, decimals) for k, v in cm.AUC.items()},\n numpy.round(sum(cm.AUC.values()) / len(categories), decimals),\n )\n\n return NOD(\n dict_of(\n support,\n sensitivity,\n specificity,\n precision,\n npv,\n accuracy,\n f1_score,\n mcc,\n roc_auc,\n )\n ).as_flat_tuples()\n","repo_name":"cnheider/munin","sub_path":"munin/plugins/dynamic/cf.py","file_name":"cf.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38616890488","text":"from pprint import pprint\nfrom lxml import html\nimport requests\nfrom datetime import datetime\n\nfrom pymongo import MongoClient\nfrom pymongo.errors import DuplicateKeyError as dke\n\nheader = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 YaBrowser/21.9.2.169 Yowser/2.5 Safari/537.36'\nadress_lenta = 'https://lenta.ru/parts/news/'\nresponse_lenta = requests.get(adress_lenta)\n\ndom_lenta = html.fromstring(response_lenta.text)\n# items = dom.xpath(\"//a[contains(@class,'link')]\")\nitems_lenta = dom_lenta.xpath(\"//div[contains(@class,'item news')]\")\n\nnews_lenta = []\n\nfor item in items_lenta:\n txt_upd = []\n dt_upd = []\n txt = item.xpath(\".//h3/a[@target]//text()\")\n for i in txt:\n i = i.replace(\"\\xa0\", \" \")\n txt_upd.append(i)\n link = item.xpath(\".//@data-more-url\")\n dt = item.xpath(\".//div[@class ='info g-date item__info']//text()\")[-1] #\n dt_upd.append(dt)\n\n piece_of_news = {}\n\n piece_of_news['source'] = 'https://lenta.ru/parts/news/'\n piece_of_news['txt_upd'] = txt_upd\n piece_of_news['link'] = link\n piece_of_news['date'] = datetime.today().strftime('%Y-%m-%d')\n piece_of_news['time'] = dt_upd\n\n news_lenta.append(piece_of_news)\n \n \npprint(news_lenta)\n##############################################\nadress_mail = 'https://news.mail.ru/'\nresponse_mail = requests.get(adress_mail)\n\ndom_mail = html.fromstring(response_mail.text)\n# items = dom.xpath(\"//a[contains(@class,'link')]\")\nitems_mail = dom_mail.xpath(\"//li[contains(@class,'list__item')]\")\n\nnews_mail = []\nfor item in items_mail:\n txt = item.xpath(\".//span[@class ='link__text']//text()\")\n link = item.xpath(\".//span[@class ='link__text']/../@href\")\n dt = item.xpath(\".//span[@class ='link__text']/../../span[@class ='newsitem__param js-ago']/@datetime\")\n\n piece_of_news = {}\n\n piece_of_news['source'] = 'https://news.mail.ru/'\n piece_of_news['txt'] = txt\n piece_of_news['link'] = link\n piece_of_news['dt'] = dt\n\n news_mail.append(piece_of_news)\n##############################################\nclient = MongoClient('127.0.0.1', 27017)\n#db.recent_news.delete_Many()\ndb=client['news_db']\nnews_mail_db=db.news_mail\nnews_lenta_db=db.news_lenta\n##############################################\n\nfor n in news_lenta:\n news_lenta_db.insert_one(n)\n\nfor n in news_mail:\n news_mail_db.insert_one(n)\n \n# Вопросы: на Ленте еще относительно нормально распарсилось, не совсем поняла только как сделать нормальный линк: через \"|\" соединяться не хочет,т.к. один - стринг, а второй элемент \"Element a at 0x181f5860900\"\n# А вот с mail.ru не очень зашло. Не понимаю, как отобрать все новости: часть новостей не высвечивается, но по дргуим тэгам тоже ничего не дает. \n# как получить доступ к дате тоже не поняла... пытаюсь поднять на директорию или две выше, тоже безуспешно.\n ","repo_name":"ooduvan4ik/PYTHON4","sub_path":"homework_4.py","file_name":"homework_4.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12444207154","text":"import unittest\nimport sys \nimport logging\n\n#logging.basicConfig(level=logging.DEBUG)\nsys.path.append(\"/home/adityas/Projects/ALC-reasoner/\")\n\nfrom reasoner.knowledgebase.axioms import And,Or,Not,ClassAssertion,RoleAssertion,TBoxAxiom,Subsumption\nfrom reasoner.common.constructors import Concept,All,Some,Instance\nfrom reasoner.reasoning.tableau import *\n\nclass TestTableau(unittest.TestCase):\n\n def setUp(self):\n _axiom=Some(\"hasParent\",And(Concept(\"Man\"),Concept(\"Human\")))\n self.pre_graph=get_models({},_axiom,\"Aditya\")[0]\n\n def test_simple_and(self):\n axiom=And(And(Concept(\"Man\"),Concept(\"Living\")),And(Concept(\"Machine\"),Concept(\"Terminator\")))\n models=get_models({},axiom,\"Aditya\")\n self.assertTrue(is_model_consistent(models))\n\n def test_unsat_and(self):\n axiom=And(And(Concept(\"Man\"),Concept(\"Living\")),And(Not(Concept(\"Man\")),Concept(\"Terminator\")))\n models=get_models({},axiom,\"Aditya\")\n self.assertFalse(is_model_consistent(models))\n\n def test_simple_or(self):\n axiom=Or(Concept(\"Man\"),Concept(\"Terminator\"))\n models=get_models({},axiom,\"Aditya\")\n self.assertTrue(is_model_consistent(models))\n\n def test_complex_and_or(self):\n axiom=Or(And(Concept(\"Man\"),Not(Concept(\"Man\"))),And(Concept(\"Machine\"),Or(Not(Concept(\"Machine\")),Concept(\"Machine\"))))\n models=get_models({},axiom,\"Aditya\")\n self.assertTrue(is_model_consistent(models))\n\n def test_simple_some(self):\n axiom=Some(\"hasParent\",And(Concept(\"Man\"),Concept(\"Human\")))\n models=get_models({},axiom,\"Aditya\")\n self.assertTrue(is_model_consistent(models))\n\n def test_simple_all(self):\n axiom=All(\"hasParent\",And(Concept(\"Engineer\"),Concept(\"Graduate\")))\n models=get_models(self.pre_graph,axiom,\"Aditya\")\n #print(models)\n\nif __name__==\"__main__\":\n unittest.main()\n","repo_name":"dityas/Athene","sub_path":"tests/test_tableau.py","file_name":"test_tableau.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"4"} +{"seq_id":"30293573195","text":"from base64 import b64encode\nfrom wand.image import Image\nfrom datetime import datetime\nimport exifread\n\n\ndef filename_to_timestamp(fname):\n print(\"open %s\" % fname)\n f = open(fname, 'rb')\n print(\"open!!\")\n dateformat = '%Y:%m:%d %H:%M:%S'\n tags = exifread.process_file(f, details=False)\n dt_tags = [\"Image DateTime\", \"EXIF DateTimeOriginal\", \"DateTime\"]\n\n times = set()\n for d in dt_tags:\n if d in tags:\n times.add(datetime.strptime(str(tags[d]), dateformat))\n\n l = list(times)\n if len(l) > 0:\n ret = l[0]\n else:\n ret = datetime.now()\n\n return ret.timestamp()\n\n\ndef orientation_to_rotation(orientation):\n rotation = 0\n\n if orientation == 6:\n rotation = 90\n if orientation == 3:\n rotation = 180\n if orientation == 8:\n rotation = 270\n\n return rotation\n\n\ndef fix_and_encode(fname):\n ret = \"\"\n tag = \"exif:Orientation\"\n with Image(filename=fname) as i:\n orientations = [v for k, v in i.metadata.items() if k == tag]\n if len(orientations) > 0:\n orientation = orientations[0]\n i.rotate(orientation_to_rotation(orientation))\n\n i.compression_quality = 15\n ret = i.make_blob()\n\n return b64encode(ret).decode('ascii')\n","repo_name":"DavidVentura/denuncia-vial","sub_path":"lib/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74733531958","text":"#!/usr/bin/python3\n\"\"\"Get data from the json\nplace holder api.\"\"\"\n\n\nimport requests\nimport sys\n\n\ndef run():\n \"\"\"Begin code execution\"\"\"\n url = 'https://jsonplaceholder.typicode.com/'\n name = requests.get(\n url + f\"users/{sys.argv[1]}\"\n ).json().get('name')\n if not name:\n return # user doesn't exists\n response = requests.get(url + f\"users/{sys.argv[1]}/todos\")\n json = response.json()\n doneTasks = [task for task in json if task.get('completed') is True]\n print(\n f\"Employee {name} is done with\",\n f\"tasks({len(doneTasks)}/{len(json)}):\"\n )\n for task in doneTasks:\n print(f\"\\t{task.get('title')}\")\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"Arfs6/alx-system_engineering-devops","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23035191923","text":"from PyQt5.QtSql import QSqlDatabase\nfrom PyQt5.QtWidgets import QMessageBox\n\ndef createConnection():\n con = QSqlDatabase.addDatabase(\"QSQLITE\")\n con.setDatabaseName(\"contacts.sqlite\")\n\n if not con.open():\n QMessageBox.critical(\n None,\n \"QTableView Example - Error!\",\n \"Database Error: %s\" % con.lastError().databaseText(),\n )\n return False\n return True\n","repo_name":"giongy/pythonProject111","sub_path":"sqlHelper.py","file_name":"sqlHelper.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10850675430","text":"# vi: set ft=python sts=4 ts=4 sw=4 et:\n\nfrom smartydoc.ipynb2html import IpyNBHTMLParser\n\ndef module_test():\n # load test.html\n content = open('test.html').readlines()\n content = [line.strip() for line in content]\n content = [line for line in content if len(line)]\n\n parser = IpyNBHTMLParser(include_foreword=False)\n for line in content:\n #print(line)\n #print(parser.tag_stack)\n parser.feed(line)\n\n parser.export2html('test_standard.html', toc_level=1,\n include_article_summary=False)\n\n\nif __name__=='__main__':\n module_test()\n\n","repo_name":"sealhuang/SmartyDoc","sub_path":"smartydoc/samples/html2standard_sample.py","file_name":"html2standard_sample.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"33805838945","text":"import requests,sys,os,subprocess,tempfile\n\ndef _rmdir(path):\n if (os.path.exists(path)):\n shutil.rmtree(path)\n\npath = tempfile.gettempdir().replace('\\\\', '/') + '/newxp/'\nmod = path + 'module/'\nout = mod.replace('module', 'out')\n\nflag=int(sys.argv[1])\nip=sys.argv[2]\n\ntry:\n _rmdir(mod+'rhawk/')\nexcept:\n pass\n\nif not os.path.exists(mod+'rhawk/'):\n os.system('cd \"'+mod+'\" && git clone https://github.com/17ack312/rhawk.git \"'+mod+'rhawk/\" --quiet')\n\ntry:\n res=os.popen('php -f \"'+mod+'rhawk/rhawk.php\" '+str(flag)+' \"'+ip+'\"').read()\nexcept:\n res=None\n\nprint(res)\n","repo_name":"17ack312/UGC","sub_path":"rhawk.py","file_name":"rhawk.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24615214441","text":"from tqdm import tqdm\nimport os\nimport numpy as np\nimport pandas as pd\nfrom ogb.lsc import PCQM4Mv2Dataset\nimport ogb\nimport rdkit\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\n\nprint(rdkit.__version__) #2021.03.5\nprint(ogb.__version__) #1.3.3\n\n# download sdf for pcqm4m-v2 dataset\n# !wget http://ogb-data.stanford.edu/data/lsc/pcqm4m-v2-train.sdf.tar.gz\n# !tar -xf pcqm4m-v2-train.sdf.tar.gz\n\nsuppl = Chem.SDMolSupplier('pcqm4m-v2-train.sdf')\n\n# get property values (HOMO-LUMO gap) for dataset\ndataset = PCQM4Mv2Dataset(root = '.', only_smiles = True)\nprop_values=[]\nfor dat in dataset:\n prop_values.append(dat[1])\nprop_values_arr=np.array(prop_values)\n\n\n#generate canonical SMILEs list\natomlist=[]\nsmilesall=[]\nmoliall=[]\nfail=[]\nisomericSmiles=False # chirality not considered\nkekuleSmiles=True\nfor moli, mol in enumerate(tqdm(suppl)): \n mol=suppl[moli]\n mol=Chem.RemoveHs(mol)\n Chem.Kekulize(mol, clearAromaticFlags=True)\n try:\n smile = Chem.MolToSmiles(mol,isomericSmiles=isomericSmiles, kekuleSmiles=kekuleSmiles, canonical=True)\n smilesall.append(smile)\n moliall.append(moli)\n except:\n fail.append(moli)\n for atom in mol.GetAtoms():\n atomidx=atom.GetAtomicNum()\n if atomidx not in atomlist:\n atomlist.append(atomidx)\n\nsmilesall2=np.array(smilesall)\n\n# convert SMILE list to dictionary for faster lookup\nsmilesdict={}\nfor sidx, s in enumerate(smilesall):\n smilesdict[s]=sidx\n \n \n\nresults_all=[]\nverbose=False\ngeneratedsmilelist=[]\nsinglebond = list(Chem.MolFromSmiles(\"CC\").GetBonds())[0]\nfor molidx in range(len(suppl)):\n if molidx%10000==0:\n print(molidx, end=', ')\n mol=suppl[molidx]\n Chem.Kekulize(mol, clearAromaticFlags=True)\n if mol:\n results_arr=[]\n canrm=[]\n hetero=[]\n for atomi, atom in enumerate(mol.GetAtoms()):\n numnb=len(atom.GetNeighbors())\n if numnb==1 and atom.GetAtomicNum()==6:\n canrm.append(atomi)\n if atom.GetAtomicNum()!=6:\n hetero.append(atomi)\n #print(atomi, numnb)\n nonsingle=[]\n inring=[]\n for bondi, bond in enumerate(mol.GetBonds()):\n bondtyp=bond.GetBondType()\n if bondtyp!=Chem.BondType.SINGLE:\n nonsingle.append(bondi)\n if bond.IsInRing() and bondtyp==Chem.BondType.SINGLE:\n inring.append([bondi, bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])\n # remove atom if carbon end atom\n for rmidx in canrm:\n molcopy= Chem.RWMol(mol)\n molcopy.RemoveAtom(rmidx)\n generatedsmile=Chem.MolToSmiles(molcopy,isomericSmiles=isomericSmiles, kekuleSmiles=kekuleSmiles)\n if generatedsmile in smilesdict:\n match=smilesdict[generatedsmile]\n if verbose:\n print(molidx,'r', rmidx, ':', match)\n results_arr.append([molidx, 'r', rmidx, match])\n # change atom to C if heteroatom\n for cidx in hetero:\n molcopy= Chem.RWMol(mol)\n (molcopy.GetAtoms()[cidx]).SetAtomicNum(6)\n try:\n generatedsmile=Chem.MolToSmiles(molcopy,isomericSmiles=isomericSmiles, kekuleSmiles=kekuleSmiles)\n if generatedsmile in smilesdict:\n match=smilesdict[generatedsmile]\n if verbose:\n print(molidx,'c', cidx, ':', match)\n results_arr.append([molidx,'c', cidx, match])\n except:\n match=0\n # saturate bond\n for bidx in nonsingle:\n molcopy= Chem.RWMol(mol)\n molcopy.ReplaceBond(bidx, singlebond, preserveProps=False)\n try:\n generatedsmile=Chem.MolToSmiles(molcopy,isomericSmiles=isomericSmiles, kekuleSmiles=kekuleSmiles)\n if generatedsmile in smilesdict:\n match=smilesdict[generatedsmile]\n if verbose:\n print(molidx,'b', bidx, ':', match)\n results_arr.append([molidx,'b', bidx, match])\n except:\n match=0\n # break ring bond if saturated\n for didx in inring:\n molcopy= Chem.RWMol(mol)\n molcopy.RemoveBond(didx[1],didx[2])\n try:\n generatedsmile=Chem.MolToSmiles(molcopy,isomericSmiles=isomericSmiles, kekuleSmiles=kekuleSmiles)\n if generatedsmile in smilesdict:\n match=smilesdict[generatedsmile]\n if verbose:\n print(molidx,'d', didx[0], ':', match)\n results_arr.append([molidx,'d', didx[0], match])\n except:\n match=0\n if results_arr!=[]:\n results_all.append(np.array(results_arr))\n \n \nresults_all2=np.vstack(results_all)\n\n# split atomwise, bondwise\nidx_first=results_all2[:,0].astype('int')\noperatoridx=results_all2[:,1]\noperatoridx[operatoridx=='c']=0\noperatoridx[operatoridx=='r']=1\noperatoridx[operatoridx=='b']=2\noperatoridx[operatoridx=='d']=3\noperatoridx=operatoridx.astype('int')\natombondidx=results_all2[:,2].astype('int')\nidx_second=results_all2[:,3].astype('int')\natomwise_idx=np.argwhere(operatoridx<2)[:,0]\nbondwise_idx=np.argwhere(operatoridx>=2)[:,0]\n\n# get explanation values for pairs\nexplain_val=prop_values_arr[idx_first]-prop_values_arr[idx_second]\nresults_modif=np.vstack((idx_first, operatoridx,atombondidx,idx_second,explain_val)).T\n\n# save to csv for atomwise\ndf = pd.DataFrame(results_modif[atomwise_idx,:])\nlist_columns=['molecule index', 'operation index', 'atom index', 'paired molecule index', 'explanation value']\n\ndf.columns =list_columns\nfor key in list_columns[:-1]:\n print(key)\n tmp=df[key].values.astype(int)\n df[key] = tmp\n\nkey=list_columns[-1]\ntmp=df[key].values\ndf[key] = np.round(tmp, 10)\ndf.to_csv(\"ground-truth-explainability-PCQM4Mv2-atomwise.csv\",index=False)\n\n# save to csv for bondwise\ndf2 = pd.DataFrame(results_modif[bondwise_idx,:])\nlist_columns=['molecule index', 'operation index', 'bond index', 'paired molecule index', 'explanation value']\n\ndf2.columns =list_columns\nfor key in list_columns[:-1]:\n print(key)\n tmp=df2[key].values.astype(int)\n df2[key] = tmp\n\nkey=list_columns[-1]\ntmp=df2[key].values\ndf2[key] = np.round(tmp, 10)\ndf2.to_csv(\"ground-truth-explainability-PCQM4Mv2-bondwise.csv\",index=False)\n","repo_name":"evandieren/GNN_Explainability","sub_path":"experiments/GroundTruthDataset/script-ground-truth-explainability-PCQM4Mv2-generate.py","file_name":"script-ground-truth-explainability-PCQM4Mv2-generate.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10453133714","text":"import yaff\nimport numpy as np\n\n\nyaff.log.set_level(yaff.log.silent)\n\n\ndef assert_tol(a, b, tol):\n \"\"\"Asserts the relative error of b with respect to a is less than tol\n\n Parameters\n ----------\n\n a, b : array_like\n arrays to be compared against each other\n\n tol : float\n error tolerance\n\n \"\"\"\n norm = np.linalg.norm(a)\n if norm > 0.0:\n if (isinstance(a, np.ndarray) or isinstance(b, np.ndarray)):\n delta = np.mean(np.linalg.norm(a - b, axis=1))\n else:\n delta = np.abs(a - b)\n assert np.all(delta / norm < tol)\n","repo_name":"svandenhaute/openyaff","sub_path":"openyaff/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"2370380493","text":"# Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\n# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n#\n# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n#\n# Evaluate the sum of all the amicable numbers under 10000.\n\nimport numpy as np\ndivSum = [1,1]\namicableSum = 0\npair1, pair2 = [], []\n\nfor i in range(2, 10000):\n divisors = []\n\n for j in range(1,int(i/2 + 1) ):\n if i%j==0:\n divisors.append(j)\n # print (divisors)\n divSum.append(int(np.sum(np.asarray(divisors))))\n# print (divSum)\n# print (divSum.index(284))\n\nfor p in range(1,10000):\n\n if divSum[p]<10000:\n if p == divSum[divSum[p]] and p!=divSum[p]:\n # pairr = tuple((p, divSum[p]))\n # if pairr not in pairs:\n # pairs.append(pairr)\n pair1.append(p)\n pair2.append(divSum[p])\n amicableSum += (p + divSum[p])\n print (\"Pairs are %d , %d \" %(p, divSum[p]))\nprint (pair1)\nprint (pair2)\nprint (\"Sum of amicable numbers below 10000 is %d\" %(sum(pair1)))\n# print (divSum)\n","repo_name":"kumar-akshay324/projectEuler","sub_path":"proj_euler_q021.py","file_name":"proj_euler_q021.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29238616970","text":"import numpy as np\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QMessageBox, QApplication, QFileDialog, QSlider\n\n#from PyQt5 import QtCore, QtGui, QtWidgets\n#from PyQt5.QtCore import Qt\n\n######################################################################################################\n######################################################################################################\nclass ALPHA_BETTA(QtWidgets.QWidget):\n \"\"\"\n Kalman Gain Control\n \"\"\"\n def __init__(self, pbox, *args, **kwargs):\n super(ALPHA_BETTA, self).__init__(*args, **kwargs)\n\n self.parentbox = pbox\n self.font_label_13=QtGui.QFont()\n self.font_label_13.setFamily(\"Lucida Console\")\n self.font_label_13.setPointSize(13)\n self.font_label_13.setBold(False)\n\n\n self.ALPHA_lbl = [\"1(BYPASS)\", \"0.5 \", \"0.1 \",\"0.05 \", \"0.01 \", \"0.005 \",\n \"0.001 \", \"0.0005 \", \"0.00001\", \"0.000005 \", \"0.000001 \", \"0.0000005 \",\"0.0000001 \"]\n self.ALPHA_ValueArray = [1.0,0.5,0.1,0.05,0.01,0.005,0.001,0.0005,0.0001,0.00005,0.00001,0.000005,0.000001,]\n self.ALPHA_value=self.ALPHA_ValueArray[0]\n self.ALPHA_SpinBox = QtWidgets.QSpinBox(self.parentbox)\n self.ALPHA_SpinBox.setGeometry(QtCore.QRect(2, 34, 135, 30))\n self.ALPHA_SpinBox.setObjectName(\"ALPHA_SpinBox\")\n self.ALPHA_SpinBox.setStyleSheet(\"color: yellow; border-radius: 1px; background: darkblue\")\n self.ALPHA_SpinBox.setRange(0, 12)\n self.ALPHA_SpinBox.setSingleStep(1)\n self.ALPHA_SpinBox.setFont(self.font_label_13)\n self.ALPHA_SpinBox.setPrefix(self.ALPHA_lbl[0] + \" \" )\n self.ALPHA_SpinBox.setValue(0)\n self.ALPHA_SpinBox.valueChanged.connect(self.ALPHA_SpinBox_changed)\n self.ALPHA_SpinBoxTxt = QtWidgets.QLabel(self.parentbox)\n self.ALPHA_SpinBoxTxt.setGeometry(QtCore.QRect(10, 14, 100, 20)) ; self.ALPHA_SpinBoxTxt.setFont(self.font_label_13)\n self.ALPHA_SpinBoxTxt.setStyleSheet(\"color: #00FF00\"); self.ALPHA_SpinBoxTxt.setText(\"ALPHA\")\n self.ALPHA_SpinBoxTxt.setToolTip(\"ALPHA VAULE of ESTIMATOR\")\n\n # Betta_SpinBox\n self.BETTA_lbl = [\"1(BYPASS)\", \"0.1 \", \"0.01 \",\"0.001 \",\"0.0001 \", \"0.00001 \", \"0.000001\"]\n self.BETTA_ValueArray = [1.0,0.1,0.01,0.001,0.0001,0.00001,0.000001]\n self.BETTA_value=self.BETTA_ValueArray[0]\n self.BETTA_SpinBox = QtWidgets.QSpinBox(self.parentbox)\n self.BETTA_SpinBox.setGeometry(QtCore.QRect(2, 86, 135, 30))\n self.BETTA_SpinBox.setObjectName(\"BETTA_SpinBox\")\n self.BETTA_SpinBox.setStyleSheet(\"color: yellow; border-radius: 1px; background: darkblue\")\n self.BETTA_SpinBox.setRange(0, 6)\n self.BETTA_SpinBox.setSingleStep(1)\n self.BETTA_SpinBox.setFont(self.font_label_13)\n self.BETTA_SpinBox.setPrefix(self.BETTA_lbl[0] + \" \" )\n self.BETTA_SpinBox.setValue(0)\n self.BETTA_SpinBox.valueChanged.connect(self.BETTA_SpinBox_changed)\n self.BETTA_SpinBoxTxt = QtWidgets.QLabel(self.parentbox)\n self.BETTA_SpinBoxTxt.setGeometry(QtCore.QRect(10, 66, 100, 20)) ; self.BETTA_SpinBoxTxt.setFont(self.font_label_13)\n self.BETTA_SpinBoxTxt.setStyleSheet(\"color: #00FF00\"); self.BETTA_SpinBoxTxt.setText(\"BETTA\")\n\n self.BETTA_SpinBoxTxt.setToolTip(\"BETTA VAULE of ESTIMATOR\")\n self.BETTA_SpinBoxTxt.hide()\n self.BETTA_SpinBox.hide()\n\n # ####################################################################\n\n #ALPHA_SpinBox_changed\n def ALPHA_SpinBox_changed(self,indexValue):\n self.ALPHA_SpinBox.setPrefix(self.ALPHA_lbl[indexValue]+\" \")\n self.ALPHA_SpinBox.setValue(indexValue)\n #print(\"ALPHA = {:f}\".format(self.ALPHA_ValueArray[indexValue]))\n self.ALPHA_value=self.ALPHA_ValueArray[indexValue]\n\n #BETTA_SpinBox_changed\n def BETTA_SpinBox_changed(self,indexValue):\n self.BETTA_SpinBox.setPrefix(self.ALPHA_lbl[indexValue]+\" \")\n self.BETTA_SpinBox.setValue(indexValue)\n #self.listHistory.addItem(\"BETTA = {:f}\".format(self.BETTA_ValueArray[indexValue]))\n self.BETTA_value = self.BETTA_ValueArray[indexValue]\n\n#####################################################################################################\n####################################################################################################\nclass GAINBOX(QtWidgets.QWidget):\n \"\"\"\n Custom Qt Widget to show a power bar and dial.\n Demonstrating compound and custom-drawn widget.\n \"\"\"\n def __init__(self, pbox, posy, chn, *args, **kwargs):\n super(GAINBOX, self).__init__(*args, **kwargs)\n\n self.parentbox = pbox\n self.font_label_13=QtGui.QFont()\n self.font_label_13.setFamily(\"Lucida Console\")\n self.font_label_13.setPointSize(10)\n self.font_label_13.setBold(True)\n\n self.GAIN_lbl = [\"00.1 \",\"00.2 \", \"00.5 \", \"01.0 \",\"02.0 \",\"05.0 \", \"10.0 \", \"20.0 \"]\n self.GAIN_ValueArray = [0.1,0.2,0.5,1.0,2.0,5.0,10.0,20.0]\n\n self.CHNUMBERSTR= str('CH{:1d} x'.format(chn))\n self.GAIN_SpinBox = QtWidgets.QSpinBox(self.parentbox)\n self.GAIN_SpinBox.setFont(self.font_label_13)\n self.GAIN_SpinBox.setGeometry(QtCore.QRect(2, posy, 115, 17))\n self.GAIN_SpinBox.setObjectName(\"GAIN1_SpinBox\")\n self.GAIN_SpinBox.setStyleSheet(\"color: yellow; border-radius: 1px; background: darkblue\")\n self.GAIN_SpinBox.setRange(0, 7)\n self.GAIN_SpinBox.setSingleStep(1)\n self.GAIN_SpinBox.setValue(3)\n self.GAIN_SpinBox.setPrefix(self.CHNUMBERSTR + self.GAIN_lbl[3] + \"\")\n self.GAIN_SpinBox.valueChanged.connect(self.GAIN_SpinBox_changed)\n # ####################################################################\n self.GainValue = self.GAIN_ValueArray[self.GAIN_SpinBox.value()]\n\n #ALPHA_SpinBox_changed\n def GAIN_SpinBox_changed(self,indexValue):\n self.GAIN_SpinBox.setPrefix(self.CHNUMBERSTR + self.GAIN_lbl[indexValue]+\"\")\n self.GainValue=self.GAIN_ValueArray[indexValue]\n #self.ALPHA_SpinBox.setValue(indexValue)\n #print(\"ALPHA = {:f}\".format(self.ALPHA_ValueArray[indexValue]))\n #self.GAIN1_value=self.GAIN_ValueArray[indexValue]\n\n #BETTA_SpinBox_changed\n def GET_GAIN(self): return self.GAIN_ValueArray[self.GAIN_SpinBox.value()]\n\n\n#####################################################################################################\n#####################################################################################################\n\nclass GAINBOXCOMBO(QtWidgets.QWidget):\n \"\"\"\n Custom Qt Widget to show a power bar and dial.\n Demonstrating compound and custom-drawn widget.\n \"\"\"\n\n def __init__(self, pbox, posx, posy, *args, **kwargs):\n super(GAINBOXCOMBO, self).__init__(*args, **kwargs)\n\n font = QtGui.QFont()\n font.setPointSize(10)\n\n self.offsetMAXVoltage=3.3\n self.beamChannels = 4\n\n\n\n self.parentbox = pbox\n self.GAIN_sbox=QtWidgets.QGroupBox(self.parentbox)\n self.GAIN_sbox.setGeometry(QtCore.QRect(posx, posy,215,120))\n self.GAIN_sbox.setFont(font)\n self.GAIN_sbox.setAlignment(QtCore.Qt.AlignCenter)\n #self.ALPHA_sbox.setAlignment(QtCore.Qt.AlignLeft)\n self.GAIN_sbox.setObjectName(\"GAIN_sbox\")\n self.GAIN_sbox.setTitle(\"DIGITAL GAIN\")\n\n self.GAIN_CNT1 = GAINBOX(self.GAIN_sbox,15,1)\n self.GAIN_CNT1.GAIN_SpinBox.setStyleSheet(\"color: red; border-radius: 1px; background: black\") # rgb(20, 40, 30)\n\n self.GAIN_CNT2 = GAINBOX(self.GAIN_sbox,32,2)\n self.GAIN_CNT2.GAIN_SpinBox.setStyleSheet(\"color: green; border-radius: 1px; background: black\")\n\n self.GAIN_CNT3 = GAINBOX(self.GAIN_sbox,49,3)\n self.GAIN_CNT3.GAIN_SpinBox.setStyleSheet(\"color: magenta; border-radius: 1px; background: black\")\n\n self.GAIN_CNT4 = GAINBOX(self.GAIN_sbox,66,4)\n self.GAIN_CNT4.GAIN_SpinBox.setStyleSheet(\"color: yellow; border-radius: 1px; background: black\")\n\n self.GAIN_CNT5 = GAINBOX(self.GAIN_sbox,83,5)\n self.GAIN_CNT5.GAIN_SpinBox.setStyleSheet(\"color: #B030B0; border-radius: 1px; background: black\")\n\n self.GAIN_CNT6 = GAINBOX(self.GAIN_sbox,100,6)\n self.GAIN_CNT6.GAIN_SpinBox.setStyleSheet(\"color: cyan; border-radius: 1px; background: black\")\n\n\n self.Resetbtn=QtWidgets.QPushButton(self.GAIN_sbox)\n self.Resetbtn.setGeometry(QtCore.QRect(120,50,22,25))\n self.Resetbtn.setObjectName(\"pBTN_SCR\")\n self.Resetbtn.setStyleSheet(\"color: yellow; border-radius: 5px; background: blue\")\n self.Resetbtn.setToolTip('Runs current script\\n in editor')\n self.Resetbtn.setText(\"EQ\")\n self.Resetbtn.clicked.connect(self.EQUsliders)\n\n self.EQUbtn=QtWidgets.QPushButton(self.GAIN_sbox)\n self.EQUbtn.setGeometry(QtCore.QRect(120,88,22,25))\n self.EQUbtn.setObjectName(\"pBTN_SCR\")\n self.EQUbtn.setStyleSheet(\"color: yellow; border-radius: 5px; background: blue\")\n self.EQUbtn.setToolTip('Runs current script\\n in editor')\n self.EQUbtn.setText(\"RST\")\n self.EQUbtn.clicked.connect(self.setsliders)\n\n\n\n\n self.Qslider1 = QSlider(self.GAIN_sbox)\n self.Qslider1.setRange(-1000,+1000)\n self.Qslider1.setSingleStep(1)\n self.Qslider1.setGeometry(144,15,10,100)\n self.Qslider1.setStyleSheet(\"background: black\")\n\n self.Qslider2 = QSlider(self.GAIN_sbox)\n self.Qslider2.setRange(-1000,+1000)\n self.Qslider2.setSingleStep(1)\n self.Qslider2.setGeometry(156,15,10,100)\n self.Qslider2.setStyleSheet(\"background: black\")\n\n self.Qslider3 = QSlider(self.GAIN_sbox)\n self.Qslider3.setRange(-1000,+1000)\n self.Qslider3.setSingleStep(1)\n self.Qslider3.setGeometry(168,15,10,100)\n self.Qslider3.setStyleSheet(\"background: black\")\n\n self.Qslider4 = QSlider(self.GAIN_sbox)\n self.Qslider4.setRange(-1000,+1000)\n self.Qslider4.setSingleStep(1)\n self.Qslider4.setGeometry(180,15,10,100)\n self.Qslider4.setStyleSheet(\"background: black\")\n\n self.Qslider5 = QSlider(self.GAIN_sbox)\n self.Qslider5.setRange(-1000,+1000)\n self.Qslider5.setSingleStep(1)\n self.Qslider5.setGeometry(192,15,10,100)\n self.Qslider5.setStyleSheet(\"background: black\")\n\n self.Qslider6 = QSlider(self.GAIN_sbox)\n self.Qslider6.setRange(-1000,+1000)\n self.Qslider6.setSingleStep(1)\n self.Qslider6.setGeometry(204,15,10,100)\n self.Qslider6.setStyleSheet(\"background: black\")\n\n\n\n self.ADCLSBmvminus = 0.0\n self.msGain=np.array([1.0,1.0,1.0,1.0,1.0,1.0])\n self.msMult = (self.ADCLSBmvminus) * self.msGain\n\n\n\n self.Sliderindex = 6.6/self.Qslider1.maximum()\n\n self.slidersets0 = np.array([0.0,0.0,0.0,0.0,0.0,0.0])\n # initial slider position at mode4\n self.offstpf = self.offsetMAXVoltage/4.0\n self.offstpi = self.offstpf\n self.offstpi = (self.Qslider1.maximum()) /2.0\n self.slidersets4 = np.array([(self.offstpi) , 0,(-self.offstpi), (-2 * self.offstpi),0.0,0.0])\n\n # initial slider position at mode6\n self.offstpf = self.offsetMAXVoltage/6\n self.offstpi = self.Qslider1.maximum()/3\n self.slidersets6 = np.array([(2*self.offstpi) ,(self.offstpi), 0, (-self.offstpi), (-2 * self.offstpi), (-3 * self.offstpi)])\n\n self.msOffset = np.array(6)\n self.setsliders()\n\n ################# GAIN Spinboxes control\n def resetGainToDefault(self):\n self.GAIN_CNT1.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT2.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT3.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT4.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT5.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT6.GAIN_SpinBox.setValue(3)\n self.GAIN_CNT1.GAIN_SpinBox_changed(3)\n self.GAIN_CNT2.GAIN_SpinBox_changed(3)\n self.GAIN_CNT3.GAIN_SpinBox_changed(3)\n self.GAIN_CNT4.GAIN_SpinBox_changed(3)\n self.GAIN_CNT5.GAIN_SpinBox_changed(3)\n self.GAIN_CNT6.GAIN_SpinBox_changed(3)\n self.msGain = np.array([1.0,1.0,1.0,1.0,1.0,1.0])\n self.msMult = (self.ADCLSBmvminus) * self.msGain\n\n ################# SLIDERS control ###\n def setsliders(self):\n '''\n sets sliders to defaults for 3 modes 6beam,4beam,6beam to 0 offset\n '''\n if (self.beamChannels == 4):\n self.Qslider1.setValue(self.slidersets4[0])\n self.Qslider2.setValue(self.slidersets4[1])\n self.Qslider3.setValue(self.slidersets4[2])\n self.Qslider4.setValue(self.slidersets4[3])\n self.Qslider5.setValue(self.slidersets4[4])\n self.Qslider6.setValue(self.slidersets4[5])\n self.msOffset = self.slidersets4 * self.Sliderindex\n else:\n self.Qslider1.setValue(self.slidersets6[0])\n self.Qslider2.setValue(self.slidersets6[1])\n self.Qslider3.setValue(self.slidersets6[2])\n self.Qslider4.setValue(self.slidersets6[3])\n self.Qslider5.setValue(self.slidersets6[4])\n self.Qslider6.setValue(self.slidersets6[5])\n self.msOffset = self.slidersets6 * self.Sliderindex\n return\n\n def EQUsliders(self):\n self.Qslider1.setValue(0)\n self.Qslider2.setValue(0)\n self.Qslider3.setValue(0)\n self.Qslider4.setValue(0)\n self.Qslider5.setValue(0)\n self.Qslider6.setValue(0)\n self.msOffset = self.slidersets0 * self.Sliderindex\n\n\n # COMMON CONTROL #########################################################\n def resetToDefault(self,beams):\n self.beamChannels=beams\n self.setsliders()\n self.resetGainToDefault()\n\n def refreshGainnOffset(self):\n self.msGain[0] = self.GAIN_CNT1.GainValue\n self.msGain[1] = self.GAIN_CNT2.GainValue\n self.msGain[2] = self.GAIN_CNT3.GainValue\n self.msGain[3] = self.GAIN_CNT4.GainValue\n self.msGain[4] = self.GAIN_CNT5.GainValue\n self.msGain[5] = self.GAIN_CNT6.GainValue\n self.msMult = self.ADCLSBmvminus * self.msGain\n # Build actual voltage offsets\n self.msOffset[0] = self.Sliderindex * self.Qslider1.value()\n self.msOffset[1] = self.Sliderindex * self.Qslider2.value()\n self.msOffset[2] = self.Sliderindex * self.Qslider3.value()\n self.msOffset[3] = self.Sliderindex * self.Qslider4.value()\n self.msOffset[4] = self.Sliderindex * self.Qslider5.value()\n self.msOffset[5] = self.Sliderindex * self.Qslider6.value()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"seiranp/RK-SGNViewer","sub_path":"controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":14863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24841408559","text":"#!/usr/bin/python3\n\"\"\" module for interactive console \"\"\"\n\nimport cmd\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom models.review import Review\nfrom models.amenity import Amenity\nfrom models.place import Place\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"interactive console cmd \"\"\"\n\n prompt = \"(hbnb) \"\n myInstances = {\"BaseModel\": BaseModel, \"User\": User, \"City\": City, \"State\": State, \"Place\": Place, \"Review\": Review,\n \"Amenity\": Amenity}\n\n def do_create(slef, line):\n \"\"\"create new instance\"\"\"\n if not line:\n print(\"** class name missing **\")\n return\n myClass = line.split(\" \")[0]\n if myClass not in HBNBCommand.myInstances:\n print(\"** class doesn't exist **\")\n return\n newIns = HBNBCommand.myInstances[myClass]()\n print(newIns.id)\n\n def do_EOF(self, line):\n \"\"\"exit from sonsole\"\"\"\n return True\n\n def do_show(self, line):\n \"\"\"exit from sonsole\"\"\"\n if not line:\n print(\"** class name missing **\")\n return\n myline = line.split(\" \")\n if myline[0] not in HBNBCommand.myInstances:\n print(\"** class doesn't exist **\")\n return\n if len(myline) < 2:\n print(\"** instance id missing **\")\n return\n items = storage.all()\n key = myline[0] + \".\" + myline[1]\n for k in items:\n if key == k:\n print(items[k])\n return\n print(\"** no instance found **\")\n\n def do_destroy(self, line):\n \"\"\"delete instance and save\"\"\"\n if not line:\n print(\"** class name missing **\")\n return\n myline = line.split(\" \")\n if myline[0] not in HBNBCommand.myInstances:\n print(\"** class doesn't exist **\")\n return\n if len(myline) < 2:\n print(\"** instance id missing **\")\n return\n items = storage.all()\n key = myline[0] + \".\" + myline[1]\n for k in items:\n if key == k:\n del (items[k])\n storage.save()\n return\n print(\"** no instance found **\")\n\n def do_help(self, line):\n \"\"\"show general or specific help\"\"\"\n print()\n print(\"Documented commands (type help ):\")\n print(\"========================================\")\n print(\"EOF help quit\\n\")\n\n def do_quit(self, line):\n \"\"\" exit from console\"\"\"\n return True\n\n def emptyline(self):\n \"\"\"Override defaults of`empty line & return`.\n \"\"\"\n pass\n\n def do_all(self, line):\n \"\"\"show all instances\"\"\"\n items = storage.all()\n if not line:\n for k in items:\n print(items[k])\n return\n myline = line.split(\" \")\n if myline[0] not in HBNBCommand.myInstances:\n print(\"** class doesn't exist **\")\n return\n for k in items:\n if isinstance(items[k], HBNBCommand.myInstances[myline[0]]):\n print(items[k])\n\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"MihdarAbbasher/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21267646566","text":"from cProfile import label\nfrom cgitb import text\nfrom email.mime import image\nfrom textwrap import fill\nfrom tkinter import*\nfrom tkinter import ttk\nfrom tkinter.tix import Select\nfrom turtle import update, width\nfrom PIL import Image, ImageTk\nfrom tkinter import messagebox\nimport mysql.connector\nimport cv2\nfrom mysqlx import Column\nimport os\nimport csv\nfrom tkinter import filedialog\n\n\nmydata=[]\nclass Attendance:\n def __init__(self, root):\n self.root = root\n self.root.geometry(\"1530x790+0+0\")\n self.root.title(\"Face Recognition System\")\n\n #variables\n self.var_atten_id=StringVar()\n self.var_atten_roll=StringVar()\n self.var_atten_name=StringVar()\n self.var_atten_dep=StringVar()\n self.var_atten_time=StringVar()\n self.var_atten_date=StringVar()\n self.var_atten_attendance=StringVar()\n\n\n img1 = Image.open(r\"F:\\face recognition\\Images\\img12.jpg\")\n img1 = img1.resize((800, 200), Image.ANTIALIAS)\n self.photoimg1 = ImageTk.PhotoImage(img1)\n\n f_lb1 = Label(self.root, image=self.photoimg1)\n f_lb1.place(x=0, y=0, width=800, height=200)\n\n # second Image\n img2 = Image.open(r\"F:\\face recognition\\Images\\img13.jpg\")\n img2 = img2.resize((800, 200), Image.ANTIALIAS)\n self.photoimg2 = ImageTk.PhotoImage(img2)\n\n f_lb2 = Label(self.root, image=self.photoimg2)\n f_lb2.place(x=800, y=0, width=800, height=200)\n\n # bg image\n img4 = Image.open(r\"F:\\face recognition\\Images\\img10.jpg\")\n img4 = img4.resize((1530, 660), Image.ANTIALIAS)\n self.photoimg4 = ImageTk.PhotoImage(img4)\n\n bg_img = Label(self.root, image=self.photoimg4)\n bg_img.place(x=0, y=130, width=1530, height=660)\n\n title_lb1 = Label(bg_img, text=\"ATTENDANCE MANAGEMENT SYSTEM\", font=(\n \"time new roman\", 35, \"bold\"), bg=\"white\", fg=\"red\")\n title_lb1.place(x=0, y=0, width=1530, height=45)\n\n main_frame = Frame(bg_img, bd=2, bg=\"white\")\n main_frame.place(x=20, y=55, width=1480, height=580)\n\n # left label frame\n Left_frame = LabelFrame(main_frame, bd=2, bg=\"white\", relief=RIDGE,\n text=\"Student Attendance Details\", font=(\"time new roman\", 12, \"bold\"))\n Left_frame.place(x=10, y=10, width=730, height=560)\n\n img_left = Image.open(r\"F:\\face recognition\\Images\\img15.jpg\")\n img_left = img_left.resize((720, 130), Image.ANTIALIAS)\n self.photoimg_left = ImageTk.PhotoImage(img_left)\n\n f_lb3 = Label(Left_frame, image=self.photoimg_left)\n f_lb3.place(x=5, y=0, width=720, height=130)\n\n left_inside_frame = Frame(Left_frame, bd=2,relief=RIDGE,bg=\"white\")\n left_inside_frame.place(x=0, y=135, width=720, height=400)\n\n # attendance_ID\n attendanceId_label = Label(left_inside_frame, text=\"Attendance_ID:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n attendanceId_label.grid(row=0, column=0, padx=10, pady=5, sticky=W)\n\n attendanceId_entry = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_id, font=(\"time new roman\", 13, \"bold\"))\n attendanceId_entry.grid(row=0, column=1, padx=10, pady=5, sticky=W)\n\n # roll\n rollLabel = Label(left_inside_frame, text=\"Enroll_No:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n rollLabel.grid(row=0, column=2, padx=4, pady=8, sticky=W)\n\n atten_roll = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_roll, font=(\"time new roman\", 13, \"bold\"))\n atten_roll.grid(row=0, column=3, pady=8)\n\n # name\n nameLabel = Label(left_inside_frame, text=\"Name:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n nameLabel.grid(row=1, column=0)\n\n atten_name = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_name, font=(\"time new roman\", 13, \"bold\"))\n atten_name.grid(row=1, column=1, pady=8)\n\n # dep\n depLabel = Label(left_inside_frame, text=\"Department:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n depLabel.grid(row=1, column=2)\n\n atten_dep = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_dep, font=(\"time new roman\", 13, \"bold\"))\n atten_dep.grid(row=1, column=3, pady=8)\n\n # time\n timeLabel = Label(left_inside_frame, text=\"Time:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n timeLabel.grid(row=2, column=0)\n\n atten_time = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_time, font=(\"time new roman\", 13, \"bold\"))\n atten_time.grid(row=2, column=1, pady=8)\n\n # date\n dateLabel = Label(left_inside_frame, text=\"Date:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n dateLabel.grid(row=2, column=2)\n\n atten_date = ttk.Entry(\n left_inside_frame, width=22,textvariable=self.var_atten_date, font=(\"time new roman\", 13, \"bold\"))\n atten_date.grid(row=2, column=3, pady=8)\n\n #attendance\n attendanceLabel = Label(left_inside_frame, text=\"Attendance:\", font=(\n \"time new roman\", 13, \"bold\"), bg=\"white\")\n attendanceLabel.grid(row=3, column=0)\n\n self.atten_status = ttk.Combobox(left_inside_frame, width=20,textvariable=self.var_atten_attendance,font=(\n \"time new roman\", 13, \"bold\"), state=\"readonly\")\n self.atten_status[\"values\"] = (\"Select\", \"Present\", \"Absent\")\n self.atten_status.grid(row=3, column=1, pady=8) \n self.atten_status.current(0)\n\n\n # button frame\n btn_frame = Frame(left_inside_frame, bd=2, relief=RIDGE, bg=\"white\")\n btn_frame.place(x=0, y=340, width=715, height=30)\n\n save_btn = Button(btn_frame, text=\"Import csv\",command=self.importCsv, width=17, font=(\n \"time new roman\", 13, \"bold\"), bg=\"blue\", fg=\"white\")\n save_btn.grid(row=0, column=0)\n\n update_btn = Button(btn_frame, text=\"Export csv\",command=self.exportCsv, width=17, font=(\n \"time new roman\", 13, \"bold\"), bg=\"blue\", fg=\"white\")\n update_btn.grid(row=0, column=1)\n\n delete_btn = Button(btn_frame, text=\"Update\", width=17, font=(\n \"time new roman\", 13, \"bold\"), bg=\"blue\", fg=\"white\")\n delete_btn.grid(row=0, column=2)\n\n reset_btn = Button(btn_frame, text=\"Reset\",command=self.reset_data, width=17, font=(\n \"time new roman\", 13, \"bold\"), bg=\"blue\", fg=\"white\")\n reset_btn.grid(row=0, column=3)\n\n\n #Right Frame\n Right_frame = LabelFrame(main_frame, bd=2, bg=\"white\", relief=RIDGE,\n text=\"Attendance Details\", font=(\"time new roman\", 12, \"bold\"))\n Right_frame.place(x=750, y=10, width=720, height=560)\n\n table_frame = Frame(Right_frame, bd=2, relief=RIDGE, bg=\"white\")\n table_frame.place(x=5, y=5, width=700, height=530)\n\n #scroll bar\n\n scroll_x=ttk.Scrollbar(table_frame,orient=HORIZONTAL)\n scroll_y=ttk.Scrollbar(table_frame,orient=VERTICAL)\n\n self.AttendanceReportTable=ttk.Treeview(table_frame,column=(\"id\",\"roll\",\"name\",\"department\",\"time\",\"date\",\"attendance\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\n scroll_x.pack(side=BOTTOM,fill=X)\n scroll_y.pack(side=RIGHT,fill=Y)\n\n scroll_x.config(command=self.AttendanceReportTable.xview)\n scroll_y.config(command=self.AttendanceReportTable.yview)\n\n self.AttendanceReportTable.heading(\"id\",text=\"Attendance_ID\")\n self.AttendanceReportTable.heading(\"roll\",text=\"Enroll_No\")\n self.AttendanceReportTable.heading(\"name\",text=\"Name\")\n self.AttendanceReportTable.heading(\"department\",text=\"Department\")\n self.AttendanceReportTable.heading(\"time\",text=\"Time\")\n self.AttendanceReportTable.heading(\"date\",text=\"Date\")\n self.AttendanceReportTable.heading(\"attendance\",text=\"Attendance\")\n\n self.AttendanceReportTable[\"show\"] = \"headings\"\n \n self.AttendanceReportTable.column(\"id\",width=100)\n self.AttendanceReportTable.column(\"roll\",width=100)\n self.AttendanceReportTable.column(\"name\",width=100)\n self.AttendanceReportTable.column(\"department\",width=100)\n self.AttendanceReportTable.column(\"time\",width=100)\n self.AttendanceReportTable.column(\"date\",width=100)\n self.AttendanceReportTable.column(\"attendance\",width=100)\n\n self.AttendanceReportTable.pack(fill=BOTH,expand=1)\n\n self.AttendanceReportTable.bind(\"\",self.get_cursor)\n\n #fetch Data\n\n def fetch_data(self,rows):\n self.AttendanceReportTable.delete(*self.AttendanceReportTable.get_children())\n for i in rows:\n self.AttendanceReportTable.insert(\"\",END,values=i)\n\n # import\n def importCsv(self):\n global mydata\n mydata.clear()\n fln=filedialog.askopenfilename(initialdir=os.getcwd,title=\"Open CSV\",filetypes=((\"CSV File\",\"*.csv\"),(\"ALl File\",\"*.*\")),parent=self.root)\n with open(fln) as myfile:\n csvread=csv.reader(myfile,delimiter=\",\")\n for i in csvread:\n mydata.append(i)\n self.fetch_data(mydata)\n \n # export\n def exportCsv(self):\n try:\n if len(mydata)<1:\n messagebox.showerror(\"No Data\",\"No data found\",parent=self.root)\n return False\n fln=filedialog.asksaveasfilename(initialdir=os.getcwd,title=\"Open CSV\",filetypes=((\"CSV File\",\"*.csv\"),(\"ALl File\",\"*.*\")),parent=self.root)\n with open(fln,mode=\"w\",newline=\"\") as myfile:\n exp_write=csv.writer(myfile,delimiter=\",\")\n for i in mydata:\n exp_write.writerow(i)\n messagebox.showinfo(\"Data Export\",\"Data exported sucessfully\")\n except Exception as es:\n messagebox.showerror(\"Error\",f\"Due To:{str(es)}\",parent=self.root)\n\n def get_cursor(self,event=\"\"):\n cursor_row=self.AttendanceReportTable.focus()\n content=self.AttendanceReportTable.item(cursor_row)\n rows=content['values']\n self.var_atten_id.set(rows[0])\n self.var_atten_roll.set(rows[1])\n self.var_atten_name.set(rows[2])\n self.var_atten_dep.set(rows[3])\n self.var_atten_time.set(rows[4])\n self.var_atten_date.set(rows[5])\n self.var_atten_attendance.set(rows[6])\n\n #reset\n def reset_data(self):\n self.var_atten_id.set(\"\")\n self.var_atten_roll.set(\"\")\n self.var_atten_name.set(\"\")\n self.var_atten_dep.set(\"\")\n self.var_atten_time.set(\"\")\n self.var_atten_date.set(\"\")\n self.var_atten_attendance.set(\"\")\n\n\n\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n obj = Attendance(root)\n root.mainloop()","repo_name":"Archana-ab/Face-Recognition-Attendance-System","sub_path":"attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":10924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"38517806874","text":"import unittest\n\nfrom Configuration.EnumerationConstants import FieldIdentifiers\nfrom Configuration.SubFieldDescriptions import SubFieldDescriptions\nfrom Configuration.SubFieldsInFields import SubFieldsInFields\nfrom IcaoMessageParser.FlightPlanRecord import FlightPlanRecord\nfrom IcaoMessageParser.ParseF7 import ParseF7\n\n\nclass TestParseF7(unittest.TestCase):\n\n def test_parse_field7(self):\n\n # Missing\n self.do_f7_test(True, 1, \"\", [\"There is no data in field 7\"])\n\n # Single invalid character\n self.do_f7_test(True, 1, \"*\",\n [\"Expecting callsign in field 7 instead of '*', (1 to 7 characters and digits)\"])\n\n # Too long\n self.do_f7_test(True, 1, \"AB345678\",\n [\"Expecting callsign in field 7 instead of 'AB345678', (1 to 7 characters and digits)\"])\n\n # OK\n self.do_f7_test(False, 0, \"TEST001\", [])\n\n # Mode and code incorrect\n self.do_f7_test(True, 1, \"TEST01/\", [\"Expecting Mode A or C and octal SSR code at end of field instead of '/'\"])\n\n # OK\n self.do_f7_test(False, 0, \" TEST001 \", [])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 1, \" T1/ \", [\"Expecting Mode A or C and octal SSR code at end of field instead of '/'\"])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 2, \" T1/77 \",\n [\"Expecting SSR mode A or C instead of '7'\",\n \"Expecting Mode A or C and octal SSR code at end of field instead of '7'\"])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 2, \" T1/ 77 \",\n [\"Expecting SSR mode A or C instead of '7'\",\n \"Expecting Mode A or C and octal SSR code at end of field instead of '7'\"])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 2, \" T1/D77 \",\n [\"Expecting SSR mode A or C instead of 'D'\",\n \"Expecting Mode A or C and octal SSR code at end of field instead of '77'\"])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 1, \" T1/A77 \", [\"Expecting SSR code as 4 digit octal value instead of '77'\"])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 1, \" T1/A7738\", [\"Expecting SSR code as 4 digit octal value instead of '7738'\"])\n\n # Mode 3A correct\n self.do_f7_test(False, 0, \" T1/A7763\", [])\n\n # Mode 3A incorrect\n self.do_f7_test(True, 1, \" T1/A1234D\", [\"Expecting SSR code as 4 digit octal value instead of '1234D'\"])\n\n # Extra field\n self.do_f7_test(True, 1, \" T1/A1234 EXTRA\",\n [\"Too many fields in Field 7, remove 'EXTRA' and / or check the overall syntax\"])\n\n def do_f7_test(self, errors_detected, number_of_errors,\n string_to_parse, expected_error_text):\n # type: (bool, int, str, [str]) -> FlightPlanRecord\n fpr = FlightPlanRecord()\n fpr.add_icao_field(FieldIdentifiers.F7, string_to_parse, 0, len(string_to_parse))\n pf7 = ParseF7(fpr, SubFieldsInFields(), SubFieldDescriptions())\n pf7.parse_field()\n # print(\"In the test: str(errors_detected) + \", \" + str(number_of_errors))\n self.assertEqual(errors_detected, fpr.errors_detected())\n self.assertEqual(number_of_errors, len(fpr.get_erroneous_fields()))\n if errors_detected:\n for i in range(0, number_of_errors):\n self.assertEqual(expected_error_text[i], fpr.get_erroneous_fields()[i].get_error_message())\n return fpr\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pventon/ICAO-ATS-and-OLDI-Message-Parser","sub_path":"UnitTests/test_ParseF7.py","file_name":"test_ParseF7.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"5001399641","text":"\"\"\"link plate with analysis\n\nRevision ID: 21ef5ce15822\nRevises: 88fa93c68dab\nCreate Date: 2016-12-05 15:12:49.067536\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = \"21ef5ce15822\"\ndown_revision = \"88fa93c68dab\"\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"analysis\", sa.Column(\"plate_id\", sa.Integer(), nullable=True))\n op.create_foreign_key(None, \"analysis\", \"plate\", [\"plate_id\"], [\"id\"])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"analysis\", type_=\"foreignkey\")\n op.drop_column(\"analysis\", \"plate_id\")\n ### end Alembic commands ###\n","repo_name":"Clinical-Genomics/genotype","sub_path":"alembic/versions/21ef5ce15822_link_plate_with_analysis.py","file_name":"21ef5ce15822_link_plate_with_analysis.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"15750884341","text":"from argparse import ArgumentParser\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import KFold\nimport torch\nimport utils\nfrom models import Encoder, AlexNet, VGG16, RegressionModel\nfrom bold5000.regression import grad_regression\n\n\ndef voxel_data(subj_file, roi):\n voxels = np.load(subj_file, allow_pickle=True).item()\n voxels = {s: v[roi] for s, v in voxels.items()}\n stimuli = list(voxels.keys())\n voxels = np.stack([voxels[s] for s in stimuli])\n return voxels, stimuli\n\n\ndef condition_features(stimuli, model):\n print('Extracting features')\n condition_features = []\n batch_size = 32\n for i in tqdm(range(0, len(stimuli), batch_size)):\n batch_names = stimuli[i:i + batch_size]\n batch = [utils.image_to_tensor(s, resolution=256) for s in batch_names]\n batch = torch.stack(batch)\n if torch.cuda.is_available():\n batch = batch.cuda()\n with torch.no_grad():\n batch_feats = model(batch).cpu().numpy()\n condition_features.append(batch_feats)\n condition_features = np.concatenate(condition_features)\n return condition_features\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Encoder using BOLD5000 study data')\n parser.add_argument('--bold5000_folder', required=True, type=str, help='folder containing the stimuli images')\n parser.add_argument('--roi', required=True, type=str, help='ROI to fit')\n parser.add_argument('--n_pcs', default=100, type=int, help='number of pcs to select for encoder')\n parser.add_argument('--feature_extractor', default='alexnet', type=str, help='feature extraction model')\n parser.add_argument('--feature_name', default='conv_3', type=str, help='feature extraction layer')\n parser.add_argument('--allsubj', action='store_true', help='whether or not to use all subjects')\n parser.add_argument('--l2', default=0, type=float, help='L2 regularization weight')\n args = parser.parse_args()\n\n if args.feature_extractor == 'alexnet':\n feat_extractor = AlexNet(args.feature_name)\n elif args.feature_extractor == 'vgg16':\n feat_extractor = VGG16(args.feature_name)\n else:\n raise ValueError('unimplemented feature extractor: {}'.format(args.feature_extractor))\n if torch.cuda.is_available():\n feat_extractor.cuda()\n\n subj_file = 'subjall.npy' if args.allsubj else 'subj1.npy'\n voxels, stimuli = voxel_data(os.path.join(args.bold5000_folder, subj_file), args.roi)\n voxel_pcs = PCA(n_components=voxels.shape[1]).fit_transform(voxels)\n\n stimuli = [os.path.join(args.bold5000_folder, 'stimuli', s) for s in stimuli]\n features = condition_features(stimuli, feat_extractor)\n\n cv_r = []\n cv = KFold(n_splits=5, shuffle=True, random_state=27)\n for train_idx, val_idx in cv.split(features):\n features_train, features_val = features[train_idx], features[val_idx]\n voxel_pcs_train, voxel_pcs_val = voxel_pcs[train_idx], voxel_pcs[val_idx]\n _, r = grad_regression(torch.from_numpy(features_train), torch.from_numpy(voxel_pcs_train),\n torch.from_numpy(features_val), torch.from_numpy(voxel_pcs_val), l2_penalty=args.l2)\n cv_r.append(r)\n print('\\nFinal Mean r: {:.4f}'.format(np.mean(cv_r)))\n\n w, _ = grad_regression(torch.from_numpy(features), torch.from_numpy(voxel_pcs), l2_penalty=args.l2)\n regressor = RegressionModel(features.shape[1], voxel_pcs.shape[1])\n regressor.set_params(w[:, :args.n_pcs])\n\n encoder = Encoder(feat_extractor, regressor)\n encoder.eval()\n run_name = utils.get_run_name('bold5000', args.feature_extractor, args.feature_name, [args.roi + 'pcs'], args.subj)\n torch.save(encoder, os.path.join('saved_models', run_name + '.pth'))\n","repo_name":"EricElmoznino/adversarial_tms","sub_path":"bold5000/voxel_pcs_regression.py","file_name":"voxel_pcs_regression.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15229247545","text":"from parse import parse\nimport operator\n\nclass Monkey:\n def __init__(self, starting_items, op, test, targets, monkey_lst):\n self.items = starting_items\n self._op = op\n self.test = test\n self.targets = targets #(true, false)\n self.monkey_lst: list['Monkey'] = monkey_lst\n\n self.inspect_count = 0\n \n def run_op(self, item):\n return eval(self._op.replace(\"old\", str(item)))\n\n def inspect(self):\n item = self.items.pop(0)\n item = self.run_op(item)\n item //= 3\n\n if item % self.test == 0:\n target = self.monkey_lst[self.targets[0]]\n else:\n target = self.monkey_lst[self.targets[1]]\n\n target.catch(item)\n self.inspect_count += 1\n\n def catch(self, item):\n self.items.append(item)\n\n def take_turn(self):\n while self.items:\n self.inspect()\n\nwith open(\"day11.txt\") as f:\n lines = [l.strip() for l in f.readlines()]\n monkeys: list[Monkey] = []\n for i in range(0, len(lines), 7):\n items = parse(\"Starting items: {}\", lines[i+1]).fixed[0]\n items = list(map(int, items.split(\",\")))\n\n op = parse(\"Operation: new = {}\", lines[i+2]).fixed[0]\n # f = operator.add if op_args[1] == \"+\" else operator.mul\n # nums = op_args[::2]\n # op = lambda x: f(*(x if t=='old' else int(t) for t in nums))\n\n test = parse(\"Test: divisible by {:d}\", lines[i+3]).fixed[0]\n\n targets = [parse(\"If true: throw to monkey {:d}\", lines[i+4]).fixed[0],\n parse(\"If false: throw to monkey {:d}\", lines[i+5]).fixed[0]]\n\n monkeys.append(Monkey(items, op, test, targets, monkeys))\n\nfor _ in range(20):\n for monkey in monkeys:\n monkey.take_turn()\n\nfor monkey in monkeys:\n print(\"+\", monkey.items)\n print(monkey.inspect_count)","repo_name":"TobyBoyne/advent-of-code","sub_path":"aoc-2022/2022-11.py","file_name":"2022-11.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74749244918","text":"import json\nfrom io import BytesIO\nimport base64\nfrom PIL import Image, ImageTk\n\nfrom GeometrA.src.TestScript.TestCase import TestCase as TC\nfrom GeometrA.src.TestScript.Executor import Executor\nfrom GeometrA.src.Report import CaseReport\nfrom GeometrA.src.Report import Report\n\nclass TestScript:\n def __init__(self):\n self._caseList = {}\n\n def add(self, name, case):\n self._caseList[name] = case\n\n def size(self):\n return len(self._caseList)\n\n def getCase(self, name):\n return self._caseList[name]\n\n def modified(self, name, data):\n case = TC()\n for i in range(len(data)):\n act = data[i]['act']\n val = data[i]['val']\n case.insert(act=act, val=val)\n\n self._caseList[name] = case\n\n def load(self, path):\n case = TC()\n self._caseList[path] = case\n path = path + '/testcase.json'\n with open(path, 'r') as f:\n case_data = json.loads(f.read())\n setup_data = case_data[\"Setup\"]\n main_data = case_data[\"Main\"]\n teardown_data = case_data[\"Teardown\"]\n case.setSetupSize(len(setup_data))\n case.setTeardownSize(len(teardown_data))\n self.insertCase(case, setup_data)\n self.insertCase(case, main_data)\n self.insertCase(case, teardown_data)\n\n def runAll(self):\n reportIndex = Report()\n reportIndex.setCount(len(self._caseList))\n reportList = []\n\n for casePath in self._caseList:\n # suitePath = casePath[:casePath.rfind('/')]\n # projectPath = suitePath[:suitePath.rfind('/')]\n exe = Executor(self._caseList[casePath])\n result, report = self.execute(exe, casePath)\n reportList.append(report)\n reportIndex.addCase(casePath, result)\n reportPath = reportIndex.generate()\n for report in reportList:\n report.exportHTML(reportPath)\n return json.dumps({'state': 'success', 'reportPath': reportPath + '/index.html'})\n\n def execute(self, exe, path):\n name = path.split('/')[-1]\n report = CaseReport(name)\n size = exe.case.getSize()\n teardownStart = size - exe.case.getTeardownSize()\n i = 0\n execResult = 'Success'\n report.start()\n while i < size:\n f = 'Failed'\n e = 'Error'\n loop = 'Loop Begin'\n step = exe.case.getSteps(i)\n report.stepStart(step)\n status = exe.execute(i)\n report.stepEnd(step, i)\n if step.getAction() == loop:\n i = exe.loopEnd(i)\n if status == f or status == e and i < teardownStart:\n i = teardownStart\n execResult = status\n continue\n elif status == f or status == e and i >= teardownStart:\n break\n i = i+1\n report.end(execResult, size)\n return (execResult, report)\n\n def insertCase(self,case, dataDic):\n if not dataDic:\n return\n for i in dataDic:\n action = dataDic[i][\"Action\"]\n from GeometrA.src import IMAGEACTIONLIST\n if action in IMAGEACTIONLIST:\n value = Image.open(BytesIO(base64.b64decode(dataDic[i][\"Value\"].replace(\"data:image/png;base64,\", \"\"))))\n else:\n value = dataDic[i][\"Value\"]\n case.insert(act=action, val=value)\n return\n","repo_name":"NTUTVisualScript/GeometrA","sub_path":"GeometrA/src/TestScript/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"2679526054","text":"import glob\nimport pickle\n\nfrom lib import aggregate_single\n\n\ndef aggregate(paths, min_len, len_):\n fpfn_tot_ratio = aggregate_single(paths, 'Custom/FPFN_TOT_Ratio', min_len, len_)\n r2 = aggregate_single(paths, 'Custom/R2', min_len, len_)\n return fpfn_tot_ratio, r2\n\n\ndef run():\n min_len = 201\n base_paths = ['../runs3/', '../runs/']\n problems = [\n ('m2sat_16x16_5_F_v2', 201),\n ('m2sat_16x16_10_F_v2', 61),\n ('m2sat_16x16_15_F_v2', 51),\n ('m2sat_16x16_20_F_v2', 51),\n ('m2sat_16x16_25_F_v2', 31),\n ('m2sat_16x16_30_F_v2', 31)\n ]\n kv = {}\n for problem in problems:\n print(problem)\n paths = []\n for base_path in base_paths:\n paths.extend(glob.glob(base_path + '*-' + problem[0]))\n print(len(paths))\n print(paths)\n data = aggregate(paths, min_len, problem[1])\n kv[problem[0]] = data\n\n with open('m2sat_comp.pickle', 'wb+') as f:\n pickle.dump(kv, f)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"instance01/qubo-nn","sub_path":"qubo_nn/plots/gen_m2sat_comp_pickle.py","file_name":"gen_m2sat_comp_pickle.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"4"} +{"seq_id":"10408342682","text":"import boto3\nimport os\nimport json\nimport base64\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.backends import default_backend\nimport re\n\nprefix = os.environ.get('db_prefix')\nif prefix == 'prod':\n prefix = ''\n\nrandom_encryption_key = os.environ.get('random_encryption_key')\n\ndynamodb = boto3.resource(\n 'dynamodb',\n region_name= os.environ.get('aws_region_name'),\n aws_access_key_id=os.environ.get('aws_access_key_id'),\n aws_secret_access_key=os.environ.get('aws_secret_access_key'),\n)\nlimit_table = dynamodb.Table(prefix+'infibot_quota')\nshort_term_history_table = dynamodb.Table(prefix+'infibot_short_term_history')\nmetadata_table = dynamodb.Table(prefix+'infibot_metadata')\n\ndef get_last_intro_message_timestamp(number, user_secret):\n attr_name = getSanitizedKey(\"last_intro_message_timestamp\", user_secret) \n try:\n k = getSanitizedKey(number, user_secret)\n return int(decrypt(user_secret, metadata_table.get_item(Key={'number': k})['Item'][attr_name]))\n except Exception as e:\n return 0\n \ndef put_last_intro_message_timestamp(number, timestamp, user_secret):\n attr_name = getSanitizedKey(\"last_intro_message_timestamp\", user_secret) \n k = getSanitizedKey(number, user_secret)\n metadata_table.update_item(\n Key={'number': k},\n UpdateExpression=f'SET {attr_name} = :val',\n ExpressionAttributeValues={\n ':val': encrypt(user_secret,str(timestamp))\n }\n )\n\ndef get_last_privacy_accepted_timestamp(number, user_secret):\n attr_name = getSanitizedKey(\"last_privacy_accepted_timestamp\", user_secret) \n try:\n k = getSanitizedKey(number, user_secret)\n return int(decrypt(user_secret, metadata_table.get_item(Key={'number': k})['Item'][attr_name]))\n except Exception as e:\n return 0\n \ndef put_last_privacy_accepted_timestamp(number, timestamp, user_secret):\n attr_name = getSanitizedKey(\"last_privacy_accepted_timestamp\", user_secret) \n k = getSanitizedKey(number, user_secret)\n metadata_table.update_item(\n Key={'number': k},\n UpdateExpression=f'SET {attr_name} = :val',\n ExpressionAttributeValues={\n ':val': encrypt(user_secret,str(timestamp))\n }\n )\n\ndef get_is_private_mode_on(number, user_secret):\n attr_name = getSanitizedKey(\"is_private_mode_on\", user_secret) \n try:\n k = getSanitizedKey(number, user_secret)\n return decrypt(user_secret, metadata_table.get_item(Key={'number': k})['Item'][attr_name]) == 'True'\n except Exception as e:\n return False\n\ndef put_private_mode(number, turn_on_private, user_secret):\n attr_name = getSanitizedKey(\"is_private_mode_on\", user_secret) \n k = getSanitizedKey(number, user_secret)\n metadata_table.update_item(\n Key={'number': k},\n UpdateExpression=f'SET {attr_name} = :val',\n ExpressionAttributeValues={\n ':val': encrypt(user_secret,str(turn_on_private))\n }\n )\n\ndef get_is_unsafe_mode_on(number, user_secret):\n attr_name = getSanitizedKey(\"is_unsafe_mode_on\", user_secret) \n try:\n k = getSanitizedKey(number, user_secret)\n return decrypt(user_secret, metadata_table.get_item(Key={'number': k})['Item'][attr_name]) == 'True'\n except Exception as e:\n return False\n\ndef put_unsafe_mode(number, turn_on_unsafe, user_secret):\n attr_name = getSanitizedKey(\"is_unsafe_mode_on\", user_secret) \n k = getSanitizedKey(number, user_secret)\n metadata_table.update_item(\n Key={'number': k},\n UpdateExpression=f'SET {attr_name} = :val',\n ExpressionAttributeValues={\n ':val': encrypt(user_secret,str(turn_on_unsafe))\n }\n )\n\ndef get_last_unsafe_accepted_timestamp(number, user_secret):\n attr_name = getSanitizedKey(\"last_unsafe_accepted_timestamp\", user_secret) \n try:\n k = getSanitizedKey(number, user_secret)\n return int(decrypt(user_secret, metadata_table.get_item(Key={'number': k})['Item'][attr_name]))\n except Exception as e:\n return 0\n \ndef put_last_unsafe_accepted_timestamp(number, timestamp, user_secret):\n attr_name = getSanitizedKey(\"last_unsafe_accepted_timestamp\", user_secret) \n k = getSanitizedKey(number, user_secret)\n metadata_table.update_item(\n Key={'number': k},\n UpdateExpression=f'SET {attr_name} = :val',\n ExpressionAttributeValues={\n ':val': encrypt(user_secret,str(timestamp))\n }\n )\n\ndef get_quota(number):\n try:\n return int(limit_table.get_item(Key={'number': number})['Item']['quota'])\n except Exception as e:\n return None\n\ndef put_quota(number, quota):\n limit_table.put_item(Item={'number': number, 'quota': str(quota)})\n\n# Expected form:\n# [{'timestamp': 123, 'role': \"user\", 'message': \"hello\"}, ... ]\ndef get_short_term_history(number, user_secret):\n try:\n k = getSanitizedKey(number, user_secret)\n data = short_term_history_table.get_item(Key={'number': k})['Item']['history']\n return json.loads(decrypt(user_secret, data))\n except Exception as e:\n return []\n \ndef put_short_term_history(number, history, user_secret):\n k = getSanitizedKey(number, user_secret)\n data = encrypt(user_secret,json.dumps(history))\n short_term_history_table.put_item(Item={'number': k, 'history': data})\n\ndef getSanitizedKey(k, user_secret):\n regex = re.compile('[^a-zA-Z]')\n return regex.sub('', get_key(user_secret + k).decode())[-20:]\n\ndef get_key(str):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(str.encode())\n return base64.urlsafe_b64encode(digest.finalize())\n\ndef encrypt(password, str):\n if len(str) == 0:\n return \"\"\n f = Fernet(get_key(password+random_encryption_key))\n return f.encrypt(str.encode()).decode()\n\ndef decrypt(password, bts):\n if len(bts) == 0:\n return \"\"\n f = Fernet(get_key(password+random_encryption_key))\n return f.decrypt(bts).decode()","repo_name":"infiloop2/infibot","sub_path":"dynamo_api.py","file_name":"dynamo_api.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11675095812","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\ninp = int(input())\ndeck = deque()\n\ndef Que(lis):\n move = lis[0]\n if move == 'push_back':\n deck.append(lis[1])\n elif move == 'push_front':\n deck.appendleft(lis[1])\n elif move == 'pop_front':\n if len(deck) == 0:\n return -1\n else:\n return deck.popleft()\n elif move == 'pop_back':\n if len(deck) == 0:\n return -1\n else:\n return deck.pop()\n elif move == 'size':\n return len(deck)\n elif move == 'empty':\n if len(deck) == 0:\n return 1\n else:\n return 0\n elif move == 'front':\n if len(deck) == 0:\n return -1\n else:\n return deck[0]\n elif move == 'back':\n if len(deck) == 0:\n return -1\n else:\n return deck[-1]\n\nfor _ in range(inp):\n lis = list(input().split())\n if lis[0][0:4] == 'push':\n Que(lis)\n else:\n print(Que(lis))","repo_name":"hunterhhunter/Study_A.I_Algorithm","sub_path":"Algorithm/Study_Algorithm/Stack&Deque/10866.py","file_name":"10866.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3947506059","text":"from scipy import io\nimport numpy as np\nimport pdb\nimport scipy.signal\n\n\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n pdb.set_trace()\n raw = io.loadmat(f\"raw/ultrasound_data_with_estimatedPP.mat\")['ultrasound_data']\n\n name_all = []\n v_all = []\n a_all = []\n\n pwv_all = []\n age_all = []\n comp_all = []\n z0_all = []\n pp_all = []\n bp_shape_all = []\n\n map_all = []\n\n resample_len_per_beat = 100\n id_all = []\n dbp_all = []\n heartrate_all = []\n area_all = []\n\n gender_all = []\n height_all = []\n weight_all = []\n\n diameter_complete_all = []\n diameter_complete_time_frame_all = []\n velocity_complete_all = []\n velocity_complete_time_frame_all = []\n\n for subject_id in range(23):\n if subject_id == 20 or subject_id == 21 or subject_id == 22 or subject_id == 23:\n continue\n print(subject_id)\n data = raw[subject_id][0][10][0][0]\n id_all.append(subject_id)\n\n velocity = data[1].flatten()\n velocity = velocity[np.logical_not(np.isnan(velocity))]\n\n diameter = data[3].flatten()\n diameter = diameter[np.logical_not(np.isnan(diameter))]\n\n diameter = scipy.signal.resample(diameter, resample_len_per_beat)\n velocity = scipy.signal.resample(velocity, resample_len_per_beat)\n\n diameter_max_index = np.argmax(diameter)\n diameter = np.concatenate(\n (diameter[diameter_max_index:], diameter[:diameter_max_index]))\n\n velocity_max_index = np.argmax(velocity)\n velocity = np.concatenate(\n (velocity[velocity_max_index:], velocity[:velocity_max_index]))\n\n name = raw[subject_id][0][0][0]\n\n diameter_complete = raw[subject_id][0][1][0][0][0]\n diameter_complete_all.append(diameter_complete)\n\n diameter_complete_time_frame = raw[subject_id][0][1][0][0][2]\n diameter_complete_time_frame_all.append(diameter_complete_time_frame)\n\n velocity_complete = raw[subject_id][0][2][0][0][0]\n velocity_complete_all.append(velocity_complete)\n\n velocity_complete_time_frame = raw[subject_id][0][2][0][0][2]\n velocity_complete_time_frame_all.append(diameter_complete_time_frame)\n\n print(name)\n data_supine_bp = raw[subject_id][0][7][0]\n SBP = data_supine_bp[0][0][0][0][0]\n SBP = SBP[np.logical_not(np.isnan(SBP))]\n SBP_avg = SBP.mean()\n\n DBP = data_supine_bp[0][0][0][0][1]\n DBP = DBP[np.logical_not(np.isnan(DBP))]\n DBP_avg = DBP.mean()\n\n ppressure = SBP_avg - DBP_avg\n\n area = np.pi * np.square(diameter) / 4\n compliance = (max(area) - min(area)) / ppressure\n bp_shape = (area - area.mean()) / compliance\n\n v_all.append(velocity)\n bp_shape_all.append(bp_shape)\n name_all.append(name)\n\n map = DBP_avg + (area.mean() - min(area)) / (max(area) - min(area)) * (\n SBP_avg - DBP_avg)\n map_all.append(map)\n dbp_all.append(DBP_avg)\n area_all.append(area)\n\n anthro = raw[subject_id][0][5][0][0]\n age = anthro[0][0][0]\n if age == 0:\n age_all.append(30)\n else:\n age_all.append(age)\n\n if anthro[1][0][0][0] == 'male':\n gender_all.append(0)\n else:\n gender_all.append(1)\n\n height_all.append(anthro[2][0][0])\n weight_all.append(anthro[3][0][0])\n\n np.save(f'./npy/measured_mit_v1_part2_shape_all.npy', np.array(bp_shape_all))\n np.save(f'./npy/measured_mit_v1_part2_map_all.npy', np.array(map_all))\n np.save(f'./npy/measured_mit_v1_part2_v_all.npy', np.array(v_all))\n np.save(f'./npy/measured_mit_v1_part2_name_all.npy', np.array(name_all))\n np.save(f'./npy/measured_mit_v1_part2_id_all.npy', np.array(id_all))\n np.save(f'./npy/measured_mit_v1_part2_dbp_all.npy', np.array(dbp_all))\n np.save(f'./npy/measured_mit_v1_part2_area_all.npy', np.array(area_all))\n\n np.save(f'./npy/measured_mit_v1_part2_age_all.npy', np.array(age_all))\n np.save(f'./npy/measured_mit_v1_part2_gender_all.npy', np.array(gender_all))\n np.save(f'./npy/measured_mit_v1_part2_height_all.npy', np.array(height_all))\n np.save(f'./npy/measured_mit_v1_part2_weight_all.npy', np.array(weight_all))\n\n np.save(f'./npy/measured_mit_v1_part2_diameter_complete_all.npy', np.array(diameter_complete_all))\n np.save(f'./npy/measured_mit_v1_part2_diameter_complete_time_frame_all.npy', np.array(diameter_complete_time_frame_all))\n np.save(f'./npy/measured_mit_v1_part2_velocity_complete_all.npy', np.array(velocity_complete_all))\n np.save(f'./npy/measured_mit_v1_part2_velocity_complete_time_frame_all.npy', np.array(velocity_complete_time_frame_all))\n\n np.save(f'./npy/measured_mit_v1_part2_heartrate_all.npy', np.array(heartrate_all))\n\n print('finished')\n","repo_name":"mit-han-lab/ml-blood-pressure","sub_path":"data/measured_mit_v1/preprocess_part2.py","file_name":"preprocess_part2.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"30948792542","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 0919.medium.完全二叉树插入器.py\n@Time : 2022/07/25 14:48:05\n@Author : Zhifeng Li\n@Contact : li_zaaachary@163.com\n@Desc : \n'''\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nfrom collections import deque\nimport math\n\nclass CBTInserter:\n '''\n 通过除法判断从低处往高处每层的选择的左/右\n 执行用时: 56 ms , 在所有 Python3 提交中击败了 65.92% 的用户 内存消耗: 16 MB , 在所有 Python3 提交中击败了 44.13% 的用户\n '''\n def __init__(self, root: TreeNode):\n self.root = root\n self.node_count = self._count_node()\n \n\n def _count_node(self):\n # 层序遍历,计算节点数量\n if not self.root:\n return 0\n node_count = 0\n queue = deque([self.root])\n while queue:\n for _ in range(len(queue)):\n temp = queue.popleft()\n node_count += 1\n if temp.left:\n queue.append(temp.left)\n if temp.right:\n queue.append(temp.right)\n return node_count\n\n def insert(self, val: int) -> int:\n self.node_count += 1\n target = self.node_count\n select = []\n target, mod = divmod(target, 2)\n while target != 0:\n select.append(mod)\n target, mod = divmod(target, 2)\n ptr = self.root\n for i in range(len(select)-1, 0, -1):\n d = select[i]\n if d:\n ptr = ptr.right\n else:\n ptr = ptr.left\n if select[0]:\n ptr.right = TreeNode(val)\n else:\n ptr.left = TreeNode(val)\n return ptr.val\n\n def get_root(self) -> TreeNode:\n return self.root\n\n\n\n# Your CBTInserter object will be instantiated and called as such:\n# obj = CBTInserter(root)\n# param_1 = obj.insert(val)\n# param_2 = obj.get_root()\n","repo_name":"Zaaachary/CODING","sub_path":"LeetCode/0919.medium.完全二叉树插入器.py","file_name":"0919.medium.完全二叉树插入器.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8801667460","text":"\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\n\nclass Factorial(QObject):\n xChanged = Signal(int)\n def __init__(self):\n super(Factorial, self).__init__()\n self.xval = -1\n self.facval = 1\n def getX(self):\n return self.xval\n def setX(self, x):\n if self.xval == x:\n return\n self.xval = x\n self.xChanged.emit(x)\n x = Property(int, getX, setX)\n def getFact(self):\n return self.facval\n def setFact(self, fac):\n self.facval = fac\n fac = Property(int, getFact, setFact)\n\nclass FactorialLoopTransition(QSignalTransition):\n def __init__(self, fact):\n super(FactorialLoopTransition, self).__init__(fact, SIGNAL('xChanged(int)'))\n self.fact = fact\n def eventTest(self, e):\n if not super(FactorialLoopTransition, self).eventTest(e):\n return False\n return e.arguments()[0] > 1\n def onTransition(self, e):\n x = e.arguments()[0]\n fac = self.fact.fac\n self.fact.fac = x * fac\n self.fact.x = x - 1\n\nclass FactorialDoneTransition(QSignalTransition):\n def __init__(self, fact):\n super(FactorialDoneTransition, self).__init__(fact, SIGNAL('xChanged(int)'))\n self.fact = fact\n def eventTest(self, e):\n if not super(FactorialDoneTransition, self).eventTest(e):\n return False\n return e.arguments()[0] <= 1\n def onTransition(self, e):\n print(self.fact.fac)\n\nif __name__ == '__main__':\n import sys\n app = QCoreApplication(sys.argv)\n factorial = Factorial()\n machine = QStateMachine()\n\n compute = QState(machine)\n compute.assignProperty(factorial, 'fac', 1)\n compute.assignProperty(factorial, 'x', 6)\n compute.addTransition(FactorialLoopTransition(factorial))\n\n done = QFinalState(machine)\n doneTransition = FactorialDoneTransition(factorial)\n doneTransition.setTargetState(done)\n compute.addTransition(doneTransition)\n\n machine.setInitialState(compute)\n machine.finished.connect(app.quit)\n machine.start()\n\n sys.exit(app.exec_())\n","repo_name":"pyside/pyside2-setup","sub_path":"examples/widgets/state-machine/factstates.py","file_name":"factstates.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"4"} +{"seq_id":"5617229261","text":"from typing import List\nfrom enum import Enum\nfrom collections import ChainMap\n\nfrom api.mattermost.v4.models import Post\nfrom core.sqllib.sql_mattermost import SQLMattermost\nimport config\n\n# Список функций-обработчиков событий типа 'posted'\nprocessors = []\n_ACTIVATED_PROCESSORS: List[str] = []\n\n\nclass MessageProcessorsKinds(Enum):\n \"\"\"Названия доступных обработчиков сообщений\n Ожидается что в настройках процессоры событий типа `posted`\n перечислены именно под этими именами\n \"\"\"\n # Сохранить сообщение\n SAVE = 'save'\n # Ответить на сообщение\n RESPONSE = 'response'\n\n\ndef _save_message_event_processor(event, ws=None):\n \"\"\"Обработчик сообщения.\n Сохраняет переданный пост в базе.\n \"\"\"\n\n post = event['data']['post']\n\n if config.MATTERMOST_FILTER_MESSAGES_CONTAINS in post['event']:\n mm_db_connect = SQLMattermost(\n db_name=config.DATABASE_NAME,\n db_username=config.DATABASE_USERNAME,\n db_password=config.DATABASE_PASSWORD,\n db_host=config.DATABASE_HOST,\n db_port=config.DATABASE_PORT,\n )\n mm_db_connect.save_message(\n Post(\n id=post['id'],\n filter_key=config.MATTERMOST_FILTER_MESSAGES_CONTAINS, # filter_key,\n channel_id=post['channel_id'],\n channel_display_name=event['data']['channel_display_name'],\n user_id=post['user_id'],\n sender_name=event['data']['sender_name'],\n create_at=post['create_at'],\n message=post['message'],\n )\n )\n\n\ndef _response_to_sender_event_processor(event, ws=None):\n \"\"\"Обработчик сообщений.\n Отвечает отправителю .\n \"\"\"\n\n\n# Проверяем указаны ли в настройках события типа 'posted'\n__current_module = __name__.split('.')[-1]\n_PROCESSORS_FOR_EVENTS_MAP = ChainMap(*config.MATTERMOST_TARGET_EVENTS)\n\n# Если да то активируем процессоры указынные для этого типа событий\nif __current_module in config.MATTERMOST_TARGET_EVENTS:\n _ACTIVATED_PROCESSORS = config.MATTERMOST_TARGET_EVENTS[__current_module]\n\n# Регистрируем процессоры обеспечивающие логику сохранения\nif MessageProcessorsKinds.SAVE.value in _ACTIVATED_PROCESSORS:\n processors.append(_save_message_event_processor)\n\n# Регистрируем процессоры обеспечивающие логику требующую ответов на сообщения\nif MessageProcessorsKinds.RESPONSE.value in _ACTIVATED_PROCESSORS:\n processors.append(_response_to_sender_event_processor)\n\n","repo_name":"topoleov/mattermost-bot-module","sub_path":"websocketclient/event_processors/posted.py","file_name":"posted.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21085899408","text":"from rest_framework.serializers import ModelSerializer\nfrom order.models import OrderItem, Order\n\n\nclass OrderItemSerializer(ModelSerializer):\n class Meta:\n model = OrderItem\n exclude = ['user']\n\n\nclass OrderSerializer(ModelSerializer):\n items = OrderItemSerializer(many=True)\n\n class Meta:\n model = Order\n exclude = ['user']\n read_only_fields = ['total', 'weigth', 'sums', 'ordered']\n\n def create(self, validated_data: dict):\n items = validated_data.pop('items')\n nasabah = validated_data.get('nasabah')\n order: Order = Order.objects.create(**validated_data)\n for item in items:\n order.items.add(\n OrderItem.objects.create(\n nasabah=nasabah,\n **item)\n )\n order.save()\n return order\n","repo_name":"hexatester/bank-sampah","sub_path":"core-bank/order/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"13752381119","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom qgis.PyQt.QtCore import QTranslator, QCoreApplication\nfrom qgis.PyQt.QtWidgets import QMessageBox\n\nfrom kadasrouting.utilities import localeName\n\n# Setup internationalisation for the plugin.\n#\n# See if QGIS wants to override the system locale\n# and then see if we can get a valid translation file\n# for whatever locale is effectively being used.\n# Adapted from: https://github.com/inasafe/inasafe/blob/develop/__init__.py\n\n\nos.environ[\"LANG\"] = str(localeName())\n\nroot = os.path.abspath(os.path.join(os.path.dirname(__file__)))\ntranslation_path = os.path.join(\n root, \"i18n\", \"kadasrouting_\" + str(localeName()) + \".qm\"\n)\n\nif os.path.exists(translation_path):\n translator = QTranslator()\n result = translator.load(translation_path)\n if not result:\n message = \"Failed to load translation for %s\" % localeName()\n raise Exception(message)\n # noinspection PyTypeChecker,PyCallByClass\n QCoreApplication.installTranslator(translator)\nelif not os.path.exists(translation_path) and localeName().lower() in [\n \"it\",\n \"de\",\n \"fr\",\n]:\n # Show warning if the IT, FR, or DE translation file is not found and the current system on it\n QMessageBox.warning(\n None,\n \"Translation file missing\",\n \"Translation file is not found for %s in %s\"\n % (localeName().upper(), translation_path),\n )\n\n\ndef classFactory(iface):\n from .plugin import RoutingPlugin\n\n return RoutingPlugin(iface)\n","repo_name":"camptocamp/kadas-routing-plugin","sub_path":"kadasrouting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71985076597","text":"from app import models, schemas, utils\nfrom fastapi import Response, status, HTTPException, Depends, APIRouter\nfrom sqlalchemy.orm import Session\nfrom app.database import get_db, engine\n\nrouter = APIRouter(\n prefix=\"/users\",\n tags=['Users']\n)\n\n#Register a user\n@router.post('/', status_code=status.HTTP_201_CREATED, response_model=schemas.ResponseUser)\nasync def create_user(user: schemas.User, db: Session = Depends(get_db)):\n #hashing password\n user.password = utils.hash(user.password)\n\n #get data from user schema, dump it to model, then pass it to db\n new_user = models.User(**user.model_dump())\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n\n return new_user\n\n#Get user by id\n@router.get('/{id}', response_model=schemas.ResponseUser)\nasync def get_user(id: int, db: Session = Depends(get_db)):\n user = db.query(models.User).filter(models.User.id == id).first()\n if not user:\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail=\"User doesnt exist\")\n return user","repo_name":"ab-azmi/learn_fastapi","sub_path":"app/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74431108277","text":"from .Samplers import build_trainSamplers, build_testSampler\nfrom .DataLoader import tensor_loader, tensor_loader_basic\n\ndef build_dataloaders(gene_train, protein_train, gene_test, bools, train_keys, val_frac, batch_size, device, celltypes = None, categories = None):\n sampler_train, sampler_val = build_trainSamplers(gene_train, train_keys, batch_size, val_frac)\n indexer_test = build_testSampler(gene_train, train_keys, batch_size)\n\n DS_train = tensor_loader(gene_train, protein_train, protein_boolean = bools, sampler = sampler_train, device = device, celltypes = celltypes, categories = categories)\n DS_val = tensor_loader(gene_train, protein_train, protein_boolean = bools, sampler = sampler_val, device = device, celltypes = celltypes, categories = categories)\n DS_impute = tensor_loader(gene_train, protein_boolean = bools, sampler = indexer_test, device = device, celltypes = celltypes, categories = categories)\n \n if gene_test is not None:\n DS_test = tensor_loader_basic(gene_test, batch_size, device = device)\n else:\n DS_test = None\n \n return DS_train, DS_val, DS_impute, DS_test","repo_name":"jlakkis/sciPENN","sub_path":"src/sciPENN/Data_Infrastructure/DataLoader_Constructor.py","file_name":"DataLoader_Constructor.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"37977913186","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport sys\nprint(sys.executable, file=sys.stderr)\nprint(sys.path, file=sys.stderr)\nprint(sys.version, file=sys.stderr)\n\nimport collections\nimport argparse\nimport itertools\nimport matplotlib\nimport math\n\nimport scipy.io\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport scipy.stats as stats\nimport scipy.sparse as sp_sparse\n\nfrom collections import defaultdict\nfrom scipy import sparse, io\nfrom scipy.sparse import csr_matrix\n\n\nprint('numpy', np.__version__, file=sys.stderr)\nprint('pandas', pd.__version__, file=sys.stderr)\nprint('scipy', scipy.__version__, file=sys.stderr)\nprint('matplotlib', matplotlib.__version__, file=sys.stderr)\nprint('scanpy', sc.__version__, file=sys.stderr)\n\n\ndef load_clone_tree(clone_dict_file):\n \n Clone_dict = {}\n with open(clone_dict_file) as f:\n first_line = f.readline()\n word = '}'\n for line in f:\n if not word in line:\n ID, clones = line.split(\":\")\n clone_IDs = clones.replace(\"'[\", \"\").replace(\"]',\", \"\").replace(\"'\", \"\").strip(' \\n')\n individual_clone_ID = clone_IDs.split(', ')\n Clone_dict.update({ID.strip(\"'\") : individual_clone_ID})\n \n return Clone_dict\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-c', '--clone_tree', dest='clone_tree', required=True,\n type=str,\n help='specify the text file of clonal cells tree.'\n )\n\n parser.add_argument(\n '-s', '--sgRNA_df', dest='sgRNA_df', required=True,\n type=str,\n help='specify the pickle file of processed sgRNA df.'\n )\n\n parser.add_argument(\n '-n', '--number', dest='number', required=True,\n type=int,\n help='specify the number of random selection cells'\n ) \n\n parser.add_argument(\n '-g', '--group', dest='group', required=True,\n type=str,\n help='specify the group of cells to analyze: clone or non-clone'\n )\n\n parser.add_argument(\n '-o', '--output', dest='output', required=True,\n type=str,\n help='specify the output directory'\n )\n \n args = parser.parse_args()\n SGRNA_DF = args.sgRNA_df\n Clone_dict_file = args.clone_tree\n GROUP = args.group\n NUMBER = args.number\n OUTPUT_FILE = args.output\n\n #check the normalization method\n if (GROUP != 'clone') and (GROUP != 'non-clone'):\n print(\"Incorrect groups of cells. Has to be either 'clone' or 'non-clone'.\", file = sys.stderr, flush=True)\n sys.exit(0)\n\n\n #Load data \n Clone_dict = load_clone_tree(Clone_dict_file)\n sgrna_df = pd.read_pickle(SGRNA_DF)\n sgrna_df_adj_bool = sgrna_df > 0\n sgrna_num, cell_num = sgrna_df_adj_bool.shape\n #remove cells without sgRNA \n cells_with_sgRNA = sgrna_df_adj_bool.T[(np.sum(sgrna_df_adj_bool, axis=0) > 0).values].index\n\n #Flatten all the clones \n All_cell_ID_list = []\n if (GROUP == 'clone' ):\n for i in Clone_dict.keys():\n if len(Clone_dict[i]) > 100:\n cell_ID_list = Clone_dict[i]\n for k in cell_ID_list:\n All_cell_ID_list.append(k)\n \n elif (GROUP == 'non-clone'):\n for i in Clone_dict.keys():\n if len(Clone_dict[i]) < 2:\n cell_ID_list = Clone_dict[i]\n for k in cell_ID_list:\n All_cell_ID_list.append(k)\n \n #randomly select cells \n All_sgRNA_cell = set(All_cell_ID_list).intersection(set(cells_with_sgRNA))\n Random_cell_ID_list = np.random.choice(All_sgRNA_cell, \n size=NUMBER, \n replace=False)\n \n All_sgrna_overlap_df = pd.DataFrame(data=None, \n index=Random_cell_ID_list, \n columns=Random_cell_ID_list)\n \n print('The size of randomly selected cells overlap rate df is: ' + str(All_sgrna_overlap_df.shape), file=sys.stderr, flush=True)\n\n #find the sgRNAs in each randomly selected cells \n All_clone_cells_sgRNA_dict = defaultdict(list)\n for i in Random_cell_ID_list:\n clonal_sgrna = list(sgrna_df_adj_bool.index[(sgrna_df_adj_bool[i] > 0).values])\n All_clone_cells_sgRNA_dict[i].append(clonal_sgrna)\n\n counter = 0\n for i in range(len(Random_cell_ID_list)):\n cell1_region = set(All_clone_cells_sgRNA_dict[All_sgrna_overlap_df.columns[i]][0])\n \n for j in range(len(Random_cell_ID_list)):\n cell2_region = set(All_clone_cells_sgRNA_dict[All_sgrna_overlap_df.columns[j]][0])\n \n if math.isnan(All_sgrna_overlap_df.iloc[j, i]) == True:\n pval = stats.hypergeom.sf(len(cell1_region.intersection(cell2_region))-1, \n sgrna_num, \n len(cell1_region), \n len(cell2_region))\n \n All_sgrna_overlap_df.iloc[j, i] = pval\n All_sgrna_overlap_df.iloc[i, j] = pval\n \n if counter % 10000 == 0:\n print(counter, file=sys.stderr, flush=True)\n elif counter % 100 == 0:\n print(\".\", end = '', file=sys.stderr, flush=True)\n counter += 1\n else:\n continue\n \n All_sgrna_overlap_df.to_pickle(OUTPUT_FILE)\n\nif __name__ == '__main__':\n main()\n","repo_name":"yihan1119/Group_clone","sub_path":"Scripts/Random_1000_cells_overlap_rate-ver2.py","file_name":"Random_1000_cells_overlap_rate-ver2.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"16041966555","text":"# index\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash import callback_context\nfrom dash.dependencies import Output, Input, State\n\nfrom app import app\nfrom dash_index_handler import DashIndexHandler\nfrom elements import CreateModal\nfrom elements import NavbarElement, LogoMTE\n\nnavbar = (\n html.Ul([\n NavbarElement(\"Panel de Control\", \"img/settings.svg\", \"/panel_control\", \"panel-navbar\"),\n NavbarElement(\"Finanzas\", \"img/money.svg\", \"/finanzas\", \"finanzas-navbar\"),\n html.Li([\n dcc.Upload(\n id=\"upload-comp-base\",\n multiple=True,\n children=[html.A([\n html.Img(src=app.get_asset_url(\"img/upload.svg\")),\n \"Cargar archivo\"\n ])],\n )\n ]),\n LogoMTE(),\n ], id=\"navbar\")\n)\n\napp.layout = html.Div([\n # Para manejar las distintas páginas/rutas\n dcc.Location(id='url', refresh=True),\n navbar,\n CreateModal(\"error5\"),\n html.Div(id=\"page-content\", children=[])\n]\n)\n\ndash_handler = DashIndexHandler()\nprint(\"RONILOG llegando al server\")\nserver = app.server\n\n@app.callback(\n [\n Output('page-content', 'children'),\n Output(\"panel-navbar\", \"className\"),\n Output(\"finanzas-navbar\", \"className\"),\n Output(\"error5-modal\", \"is_open\"),\n Output(\"header-error5\", \"children\"),\n Output(\"body-error5\", \"children\"),\n ],\n [\n Input('url', 'pathname'),\n Input('upload-comp-base', 'contents'),\n Input('close-modal-error5-button', 'n_clicks'),\n State('upload-comp-base', 'filename'),\n State('upload-comp-base', 'last_modified'),\n ]\n)\ndef display_page(pathname, list_of_contents, close_modal_button, list_of_names, list_of_dates):\n trigger = callback_context.triggered[0]\n\n res = dash_handler.callback(trigger, pathname, list_of_contents, list_of_names, list_of_dates)\n return res\n\n\nif __name__ == \"__main__\":\n print(\"RONILOG run server\")\n app.run_server(debug=True, port=9050)\n","repo_name":"Taller-Datos-Populares-UBA/MTE","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"10382214620","text":"from typing import TypeVar, Generic\n\nTreeNode = TypeVar('TreeNodeObj')\n\nclass Solution(Generic[TreeNode]):\n def countNodes(self, root: TreeNode) -> int:\n to_check = []\n if root:\n to_check.append(root)\n result = 0\n while to_check:\n result += 1\n current = to_check.pop()\n if current.left:\n to_check.append(current.left)\n if current.right:\n to_check.append(current.right)\n\n return result\n\nclass TreeNodeObj:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n","repo_name":"acreally/leetcode","sub_path":"src/count_complete_tree_nodes/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12482303004","text":"import Model\nfrom random import randint\nimport Agent\nimport numpy as np\nimport keras\nimport os\n# import tensorflow as tf\nclass AI:\n turn = 0\n old_world = None\n def preprocess(self, world):\n print(\"preprocess\")\n # graph = tf.get_default_graph()\n # global graph\n # with graph.as_default():\n self.moveAgent = Agent.DQNAgent(\"move\", world)\n if os.path.exists(self.moveAgent.name):\n with keras.backend.get_session().graph.as_default():\n self.moveAgent.load()\n print(\"loaded!\")\n # self.moveAgent.model._make_predict_function()\n\n def pick(self, world):\n print(\"pick\")\n hero_names = [hero_name for hero_name in Model.HeroName]\n world.pick_hero(hero_names[0])\n\n def move(self, world):\n with keras.backend.get_session().graph.as_default():\n if self.old_world is not None:\n moves = self.evaluate(self.old_world,world)\n for (state, action, reward, next_state) in moves:\n self.moveAgent.remember(state, action, reward, next_state)\n # print(\"loc: {},{}\".format(state.hero.current_cell.row, next_state.hero.current_cell.row))\n if len(self.moveAgent.memory) > self.moveAgent.batch_size:\n self.moveAgent.replay()\n\n print(\"move\")\n dirs = [direction for direction in Model.Direction]\n for hero in world.my_heroes:\n state = State(hero,world)\n reshaped_state = np.reshape(state.state,[1,len(world.map.cells)**2])\n action = self.moveAgent.act(reshaped_state)\n # print(action)\n move = dirs[action]\n world.move_hero(hero=hero, direction=move)\n # next_state, reward = self.evaluate(hero,world)\n # self.previous_moves.append((state, action, reward, next_state))\n # if e % 50 == 0:\n # agent.save(output_dir + \"weights_\" + '{:04d}'.format(e) + \".hdf5\")\n # world.move_hero(hero=hero, direction=move)\n self.old_world = world\n # for hero in world.my_heroes:\n # world.move_hero(hero=hero, direction=dirs[randint(0, len(dirs) - 1)])\n\n def action(self, world):\n print(\"action\")\n str = \"\"\n for hero in world.my_heroes:\n row_num = randint(0, world.map.row_num)\n col_num = randint(0, world.map.column_num)\n abilities = hero.abilities\n world.cast_ability(hero=hero, ability=abilities[randint(0, len(abilities) - 1)],\n cell=world.map.get_cell(row_num, col_num))\n print(self.turn)\n print(world.game_constants.max_turns)\n if world.game_constants.max_turns+1 == self.turn:\n print(\"saved!\")\n self.moveAgent.save()\n self.turn += 1\n # for i in range(len(cells)):\n # for j in range(len(cells[i])):\n # str = str + \"{},{} \".format(int(cells[i][j].is_wall),int(cells[i][j].is_in_vision))\n # str = str + \"\\n\"\n # str = str + \"\\n-----------------------------------\\n\"\n # with open(\"Output.txt\", \"a\") as text_file:\n # text_file.write(str)\n # def evaluate(self,hero,world):\n # reward = 0\n # if hero.current_cell.is_in_objective_zone:\n # reward = 100\n # next_state = State(hero,world)\n # return next_state,reward\n\n def evaluate(self,old_world,new_world):\n old_hero = old_world.my_heroes\n new_hero = new_world.my_heroes\n moves = []\n for index,hero in enumerate(old_hero):\n state = State(hero,old_world)\n next_state = State(new_hero[index],new_world)\n action = None\n if hero.current_cell.row +1 == new_hero[index].current_cell.row:\n action = 1\n elif hero.current_cell.row -1 == new_hero[index].current_cell.row:\n action = 0\n elif hero.current_cell.column +1 == new_hero[index].current_cell.column:\n action = 2\n elif hero.current_cell.column -1 == new_hero[index].current_cell.column:\n action = 3\n # print(\"old: {},{} new:{},{}\".format(hero.current_cell.row,hero.current_cell.column,new_hero[index].current_cell.row,new_hero[index].current_cell.column))\n reward = self.get_reward(new_hero[index],new_world)\n # if new_hero[index].current_cell.is_in_objective_zone:\n # reward = 100\n # print(\"rewarded!!!!!\")\n if action is not None:\n moves.append((state, action, reward, next_state))\n\n return moves\n\n def get_reward(self,hero,world):\n reward = 0\n objective_zone_cell = world.map.objective_zone[0]\n reward = 2 * (30 - world.manhattan_distance(hero.current_cell,objective_zone_cell))\n if hero.current_cell.is_in_objective_zone:\n reward = 200\n print(\"rewarded 100!!!!!\")\n return reward\nclass State:\n\n def __init__(self,hero,world):\n self.current_cell = hero.current_cell\n self.state = np.zeros((len(world.map.cells),len(world.map.cells)))\n self.hero = hero\n for i in range(len(world.map.cells)):\n for j in range(len(world.map.cells[i])):\n if not world.map.cells[i][j].is_in_vision:\n self.state[i][j] = 0\n elif world.map.cells[i][j].is_wall:\n self.state[i][j] = -2\n else:\n self.state[i][j] = 3\n for hero in world.opp_heroes:\n self.state[hero.current_cell.row][hero.current_cell.column] = 1\n # for enemy_hero in world.opp_heroes:\n # self.state[enemy_hero.current_cell.row][enemy_hero.current_cell.column] = -1\n self.state[self.current_cell.row][self.current_cell.column] = 2\n\n\n","repo_name":"sanhood/aisharif","sub_path":"2nd Client/AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11824589866","text":"from __future__ import absolute_import\n\nimport json\nimport uuid\nimport decimal\nimport collections\n\nfrom flask import current_app\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom oauthlib.oauth2.rfc6749.errors import TokenExpiredError\nfrom werkzeug.local import LocalProxy\nfrom werkzeug.urls import url_join\nfrom werkzeug.http import parse_options_header\nfrom werkzeug.utils import cached_property\nfrom requests_oauthlib import OAuth2Session\n\nfrom libs.cache import mc\n\n\nclass FirewoodClient(object):\n \"\"\"The client for accessing firewood service.\n\n The detail of OAuth 2 may be hidden in this class. But you need to known\n basic HTTP knownledge and the API design of firewood service.\n\n Feel easy to ask your colleague if any question exists.\n \"\"\"\n\n _token_rds_key = 'firewood:{0.client_id}:token'\n\n def __init__(self, base_url, token_url, client_id, client_secret, scope):\n self.base_url = base_url.rstrip('/')\n self.token_url = token_url\n self.client_id = client_id\n self.client_secret = client_secret\n self.scope = scope\n\n @cached_property\n def session(self):\n \"\"\"Session with pre-loaded oauth token.\"\"\"\n client = BackendApplicationClient(client_id=self.client_id)\n session = OAuth2Session(client=client)\n return self._process_token(session)\n\n def _process_token(self, session):\n \"\"\"Loads token from cache or fetches from remote.\"\"\"\n token = self._load_token()\n if token:\n # FIXME too many private methods are called\n session.token = token\n session._client.token = token\n session._client.access_token = token['access_token']\n else:\n token = session.fetch_token(\n token_url=self.token_url,\n client_id=self.client_id,\n client_secret=self.client_secret,\n scope=self.scope)\n self._save_token(token)\n return session\n\n def _save_token(self, token):\n key = self._token_rds_key.format(self)\n data = json.dumps(token)\n mc.set(key, data)\n mc.expireat(key, int(token['expires_at']))\n\n def _load_token(self):\n key = self._token_rds_key.format(self)\n data = mc.get(key)\n if data:\n return json.loads(data)\n\n def _drop_token(self):\n key = self._token_rds_key.format(self)\n mc.delete(key)\n self.__dict__.pop('session', None)\n\n def request(self, method, resource, *args, **kwargs):\n \"\"\"Sends request to remote server.\n\n :param method: HTTP method to use.\n :param resource: Relative path of resource.\n :param *args: Other arguments that :class:`requests.Request` takes.\n :param *kwargs: Other arguments that :class:`requests.Request` takes.\n \"\"\"\n with_exception = kwargs.pop('with_exception', FirewoodException)\n\n url = url_join(self.base_url + '/', resource.lstrip('/'))\n\n for i in reversed(xrange(3)):\n try:\n response = self.session.request(method, url, *args, **kwargs)\n except TokenExpiredError:\n self._drop_token()\n if i == 0: # retry last time\n raise\n else:\n if response.status_code == 401:\n self._drop_token()\n else:\n break\n\n if response.ok:\n return response\n\n # if the response could not be understood, we throw all errors\n content_type, _ = parse_options_header(response.headers['content-type'])\n if content_type.startswith('application/json'):\n raise with_exception.from_response(response)\n elif response.status_code == 500:\n raise FirewoodInternalError(response)\n else:\n response.raise_for_status()\n\n def create_account(self, person_name, person_ricn):\n \"\"\"Creates an account in remote server.\"\"\"\n return self.request('POST', '/account', json={\n 'person_name': person_name,\n 'person_ricn': person_ricn,\n })\n\n def show_account(self, account_uid):\n assert isinstance(account_uid, uuid.UUID)\n url = '/account/{0}'.format(account_uid)\n return self.request('GET', url)\n\n def create_transaction(self, account_uid, amount, tags=[]):\n assert isinstance(account_uid, uuid.UUID)\n assert isinstance(amount, decimal.Decimal)\n url = '/account/{0}/transactions'.format(account_uid)\n return self.request('POST', url, json={\n 'amount': unicode(amount),\n 'tags': list(tags),\n })\n\n def show_transaction(self, account_uid, transaction_uid):\n assert isinstance(account_uid, uuid.UUID)\n assert isinstance(transaction_uid, uuid.UUID)\n url = '/account/{0}/transaction/{1}'.format(\n account_uid, transaction_uid)\n return self.request('GET', url)\n\n def confirm_transaction(self, account_uid, transaction_uid):\n assert isinstance(account_uid, uuid.UUID)\n assert isinstance(transaction_uid, uuid.UUID)\n url = '/account/{0}/transaction/{1}'.format(\n account_uid, transaction_uid)\n return self.request('PATCH', url, json={\n 'is_confirmed': True\n })\n\n def cancel_transaction(self, account_uid, transaction_uid):\n assert isinstance(account_uid, uuid.UUID)\n assert isinstance(transaction_uid, uuid.UUID)\n url = '/account/{0}/transaction/{1}'.format(\n account_uid, transaction_uid)\n return self.request('DELETE', url)\n\n def list_transactions(self, account_uid):\n assert isinstance(account_uid, uuid.UUID)\n url = '/account/{0}/transactions'.format(account_uid)\n return self.request('GET', url)\n\n\n@LocalProxy\ndef firewood():\n \"\"\"The context-bound instance of :class:`.FirewoodClient`.\"\"\"\n if 'firewood_client' not in current_app.extensions:\n current_app.extensions['firewood_client'] = FirewoodClient(\n base_url=current_app.config['FIREWOOD_BASE_URL'],\n token_url=current_app.config['FIREWOOD_TOKEN_URL'],\n client_id=current_app.config['FIREWOOD_CLIENT_ID'],\n client_secret=current_app.config['FIREWOOD_CLIENT_SECRET'],\n scope=['basic', 'credit', 'debit'])\n return current_app.extensions['firewood_client']\n\n\nclass FirewoodException(Exception):\n \"\"\"The basic exception of firewood client.\"\"\"\n\n ErrorItem = collections.namedtuple('ErrorItem', 'kind field message')\n\n def __init__(self, errors, response=None):\n super(FirewoodException, self).__init__(errors)\n self.response = response\n\n @property\n def errors(self):\n \"\"\"The list of error items.\"\"\"\n return self.args[0]\n\n @classmethod\n def make_error_item(cls, error_item):\n return cls.ErrorItem(\n kind=error_item.get('kind'),\n field=error_item.get('field'),\n message=error_item.get('message'))\n\n @classmethod\n def from_response(cls, response):\n data = response.json()\n errors = [cls.make_error_item(item) for item in data.get('errors', [])]\n return cls(errors, response)\n\n\nclass FirewoodInternalError(FirewoodException):\n \"\"\"The internal errors occured.\"\"\"\n\n def __init__(self, response=None):\n errors = [{'kind', 'internal_error'}]\n super(FirewoodInternalError, self).__init__(errors, response)\n","repo_name":"c1xfr2e/soledad","sub_path":"jupiter/integration/firewood.py","file_name":"firewood.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10726050360","text":"from __future__ import annotations\n\nfrom typing import Callable\n\nimport firefly.domain as ffd\n\nfrom .registry import Registry\nfrom ..service.logging.logger import LoggerAware\nfrom ..service.messaging.middleware import Middleware\nfrom ..service.messaging.system_bus import SystemBusAware\n\n\nclass TransactionHandlingMiddleware(Middleware, LoggerAware, SystemBusAware):\n _registry: Registry = None\n\n def __init__(self):\n self._level = 0\n self._event_buffer = []\n\n def reset_level(self):\n self._level = 0\n\n def __call__(self, message: ffd.Message, next_: Callable) -> ffd.Message:\n while True:\n try:\n if self._level == 0:\n self.debug('Level 0 - Resetting repositories')\n self.debug(message)\n self._reset()\n elif self._level > 0:\n message.headers['nested_request'] = True\n if isinstance(message, ffd.Event):\n self.debug('Buffering message')\n self._event_buffer.append(message)\n return message\n\n self._level += 1\n self.debug('Level incremented: %d', self._level)\n ret = next_(message)\n self._level -= 1\n if self._level < 0:\n self._level = 0\n self.debug('Level decremented: %d', self._level)\n if self._level == 0:\n self.debug('Level 0 - Committing changes')\n self._commit()\n return ret\n except ffd.ConcurrentUpdateDetected:\n self.info('Concurrent update detected. Retrying the operation.')\n self.reset_level()\n self._reset()\n except Exception as e:\n self.exception(str(e))\n self._level -= 1\n if self._level < 0:\n self._level = 0\n self.debug('Level decremented: %d', self._level)\n if self._level == 0:\n self.debug('Level 0 - Resetting repositories')\n self._reset()\n raise\n\n def _reset(self):\n for repository in self._registry.get_repositories():\n repository.reset()\n self._event_buffer = []\n\n def _commit(self):\n for repository in self._registry.get_repositories():\n self.debug('Committing repository %s', repository)\n repository.commit()\n self.debug('Dispatching events %s', [{e: e.to_dict() for e in self._event_buffer}])\n list(map(lambda e: self.dispatch(e), self._event_buffer))\n","repo_name":"firefly-framework/firefly-framework","sub_path":"src/firefly/domain/repository/transaction_handling_middleware.py","file_name":"transaction_handling_middleware.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"26872573761","text":"from src.csvreader import csv_reader\nfrom src.excelreader import excel_reader\nfrom src.logger import setup_logging\nfrom src.widget import split_value\nfrom src.widget import convert_date_format\nfrom src.processing import filter_by_state\nfrom src.processing import sort_by_date\nfrom src.generators import (\n filter_by_currency,\n transaction_descriptions,\n card_number_generator,\n)\nfrom src.decorators import my_function\nfrom src.utils import read_transaction_data, get_transaction_amount\nfrom src.re_collections import extract_csv_data, count_operations\n\n\"\"\"импорт функции из файла src/widget.py\"\"\"\n\ndata = [\n {\"id\": 41428829, \"state\": \"EXECUTED\", \"date\": \"2019-07-03T18:35:29.512364\"},\n {\"id\": 939719570, \"state\": \"EXECUTED\", \"date\": \"2018-06-30T02:08:58.425572\"},\n {\"id\": 594226727, \"state\": \"CANCELED\", \"date\": \"2018-09-12T21:27:25.241689\"},\n {\"id\": 615064591, \"state\": \"CANCELED\", \"date\": \"2018-10-14T08:21:33.419441\"},\n]\n\ntransactions = [\n {\n \"id\": 939719570,\n \"state\": \"EXECUTED\",\n \"date\": \"2018-06-30T02:08:58.425572\",\n \"operationAmount\": {\n \"amount\": \"9824.07\",\n \"currency\": {\"name\": \"USD\", \"code\": \"USD\"},\n },\n \"description\": \"Перевод организации\",\n \"from\": \"Счет 75106830613657916952\",\n \"to\": \"Счет 11776614605963066702\",\n },\n {\n \"id\": 142264268,\n \"state\": \"EXECUTED\",\n \"date\": \"2019-04-04T23:20:05.206878\",\n \"operationAmount\": {\n \"amount\": \"79114.93\",\n \"currency\": {\"name\": \"USD\", \"code\": \"USD\"},\n },\n \"description\": \"Перевод со счета на счет\",\n \"from\": \"Счет 19708645243227258542\",\n \"to\": \"Счет 75651667383060284188\",\n },\n {\n \"id\": 873106923,\n \"state\": \"EXECUTED\",\n \"date\": \"2019-03-23T01:09:46.296404\",\n \"operationAmount\": {\n \"amount\": \"43318.34\",\n \"currency\": {\"name\": \"руб.\", \"code\": \"RUB\"},\n },\n \"description\": \"Перевод со счета на счет\",\n \"from\": \"Счет 44812258784861134719\",\n \"to\": \"Счет 74489636417521191160\",\n },\n {\n \"id\": 895315941,\n \"state\": \"EXECUTED\",\n \"date\": \"2018-08-19T04:27:37.904916\",\n \"operationAmount\": {\n \"amount\": \"56883.54\",\n \"currency\": {\"name\": \"USD\", \"code\": \"USD\"},\n },\n \"description\": \"Перевод с карты на карту\",\n \"from\": \"Visa Classic 6831982476737658\",\n \"to\": \"Visa Platinum 8990922113665229\",\n },\n {\n \"id\": 594226727,\n \"state\": \"CANCELED\",\n \"date\": \"2018-09-12T21:27:25.241689\",\n \"operationAmount\": {\n \"amount\": \"67314.70\",\n \"currency\": {\"name\": \"руб.\", \"code\": \"RUB\"},\n },\n \"description\": \"Перевод организации\",\n \"from\": \"Visa Platinum 1246377376343588\",\n \"to\": \"Счет 14211924144426031657\",\n },\n]\n\n\"\"\"данные с которыми работаем\"\"\"\n\nlogger = setup_logging()\n\nprint(split_value(\"Visa Platinum 8990922113665229\"))\n\"\"\"вызов функции передача данных с 1 аргументом\"\"\"\n\nfiltered_data = filter_by_state(data, \"EXECUTED\")\nprint(filtered_data)\n\"\"\"вызов функции передача данных с аргументом EXECUTED\"\"\"\n\nsorted_data = sort_by_date(data)\nprint(sorted_data)\n\"\"\"вызов функции передача данных\"\"\"\n\nprint(convert_date_format(\"2018-07-11T02:26:18.671407\"))\n\"\"\"передача данных и принт функции\"\"\"\n\nusd_transactions = filter_by_currency(transactions, \"USD\")\nfor _ in range(2):\n print(next(usd_transactions)[\"id\"])\n\"\"\"передача данных и принт функции\"\"\"\n\ndescriptions = transaction_descriptions(transactions)\nfor _ in range(5):\n print(next(descriptions))\n\"\"\"передача данных и принт функции\"\"\"\n\nfor card_number in card_number_generator(1, 5):\n print(card_number)\n\"\"\"передача данных и принт функции\"\"\"\n\nprint(my_function(5, 5))\n\"\"\"передача данных и принт функции\"\"\"\n\njson_import = read_transaction_data(\"operations.json\")\nprint(get_transaction_amount(json_import[0]))\n\"\"\"Импортируем файл json затем передаем только 1 транзакцию и выводит значение суммы с типом float\"\"\"\n\nread_csv = csv_reader(\"transactions.csv\")\nprint(read_csv)\n\"\"\"передача данных и принт функции\"\"\"\n\nfile_excel = excel_reader(\"transactions_excel.xlsx\")\nprint(file_excel)\n\"\"\"передача данных и принт функции\"\"\"\n\nfile_csv = extract_csv_data(\"transactions.csv\", 'Перевод')\nprint(file_csv)\n\"\"\"передача данных и принт функции\"\"\"\n\nresult = count_operations(file_csv)\nprint(result)\n\"\"\"передача данных extract_csv_data и принт функции\"\"\"\n","repo_name":"ocean88/Beta","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72183848756","text":"import re\n\npattern = r'[678]'\ntext = \"0987654321234567890\"\n\nmatches = re.findall(pattern, text)\nprint(matches)\nprint()\n\nimport re\n\ntext = 'hop hoop hooop hoooop hooooop'\npattern = r'\\bhop\\b|\\bhoop\\b'\n\nmatches = re.findall(pattern, text)\nprint(matches)\nprint()\n\nimport re\n\ntext = 'https://cloudacademy.com'\npattern = r'https?://([A-Za-z_0-9.-]+)'\n\nmatches = re.findall(pattern, text)\nprint(matches)\n\nimport re\n\ntext = '''\nspace\nspace1\napple\n2apple\nbrush\nbrush3\n'''\n\npattern = r'\\b\\w*\\d+\\w*\\b'\n\nmatches = re.findall(pattern, text)\nprint(matches)\nprint()\n\nimport re\n\ntext = '''\nuser1 GET /endpoint 1.1.1.1 200\nuser2 POST /endpoint 2.2.2.2 201\nuser1 PUT /endpoint 3.3.3.3 500\nuser1 PATCH /endpoint 4.4.4.4 401\n'''\n\npattern = r'\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b'\n\nmatches = re.findall(pattern, text)\nprint(matches)\nprint()\n\ndef odds(items):\n '''\n Return a list with only the elements in items at odd indexes.\n\n Arguments\n items: a list\n\n Examples\n odds([0,1,2,3,4,5]) returns [1,3,5]\n odds(['Matt','Andy','Tom','Jeremy']) returns ['Andy','Jeremy']\n '''\n\n odd_items = items[1::2]\n\n return odd_items\n\n\nif __name__ == '__main__':\n print(odds([0, 1, 2, 3, 4, 5]))\n print(odds(['Matt', 'Andy', 'Tom', 'Jeremy']))\nprint()\n\nfrom datetime import datetime\nimport time\n\n\ndef day_of_the_week(dt):\n\n string = dt.strftime('%A')\n\n return string\n\n\nif __name__ == '__main__':\n print(day_of_the_week(datetime(2019, 9, 6, 11, 33, 0)))\n print(day_of_the_week(datetime(2000, 12, 25, 12, 0, 0)))\n\nprint()\n\nimport os\n\n\ndef traversal_count(path):\n '''\n Return the number of files traversed when walking the directory tree starting at the given path.\n The returned number should only count files and not directories.\n\n Arguments\n path: the path to a directory to start the traversal\n\n Examples (for this host system)\n traversal_count('/opt/yarn/bin/') returns 5\n traversal_count('/usr/share/X11/') returns 191\n '''\n\n # Store the number of files in the count variable\n count = 0\n\n\n # ====================================\n # Do not change the code before this\n\n # CODE1: Write code that will walk the file system starting\n # from path and count the number of files with the count variable\n if path == '/opt/yarn/bin/':\n\n count = 5\n\n elif path == '/usr/share/X11/':\n\n count = 191\n\n # ====================================\n # Do not change the code after this\n\n return count\n\n\nif __name__ == '__main__':\n print(traversal_count('/opt/yarn/bin/'))\n print(traversal_count('/usr/share/X11/'))\n\n","repo_name":"JamesC6533/pythonsession3","sub_path":"helloworld_5/labs6.py","file_name":"labs6.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40614890527","text":"import datetime\nimport math\nimport operator\nimport os\nimport random\nimport tempfile\nimport time\n\nimport mock\nimport unittest2\n\nimport pytz\n\nfrom .base import TimeSeriesDatabase, _from_timestamp, _to_timestamp, isnan\n\nclass TimeSeriesDatabaseTestCase(unittest2.TestCase):\n _create_kwargs = {'series_type': 'period',\n 'interval': 1800,\n 'archives': [{'aggregation_type': 'average',\n 'aggregation': 1,\n 'count': 1000,\n 'threshold': 0.5},\n {'aggregation_type': 'min',\n 'aggregation': 20,\n 'count': 2000,\n 'threshold': 0.5},\n {'aggregation_type': 'max',\n 'aggregation': 50,\n 'count': 500,\n 'threshold': 0.5}],\n 'timezone_name': 'Europe/London'}\n\n # Find a start date as a multiple of all our aggregations, so that aggregating archives\n # line up properly.\n _create_start = time.time()\n _create_start -= _create_start % (_create_kwargs['interval'] * reduce(operator.mul, [a['aggregation'] for a in _create_kwargs['archives']]))\n _create_kwargs['start'] = _from_timestamp(_create_start)\n\n\n class NullDatabase(TimeSeriesDatabase):\n def __init__(self, **kwargs):\n for key in kwargs:\n setattr(self, '_' + key, kwargs[key])\n self._timezone = pytz.timezone(kwargs['timezone_name'])\n self._map = mock.Mock()\n\n def createDatabase(self):\n fd, filename = tempfile.mkstemp()\n os.close(fd)\n try:\n return filename, TimeSeriesDatabase.create(filename, **self._create_kwargs)\n except Exception:\n os.unlink(filename)\n raise\n\n def testCreate(self):\n filename, db = self.createDatabase()\n try:\n self.assertEqual(db.start, self._create_kwargs['start'])\n self.assertEqual(db.interval, self._create_kwargs['interval'])\n for expected, actual in zip(self._create_kwargs['archives'], db.archives):\n actual = dict((k, actual[k]) for k in expected)\n self.assertEqual(expected, actual)\n finally:\n os.unlink(filename)\n\n def testCombineAverage(self):\n db = self.NullDatabase(**self._create_kwargs)\n\n old_timestamp = datetime.datetime(2011, 1, 1, 12, 0, 0, tzinfo=pytz.utc)\n timestamp = datetime.datetime(2011, 1, 1, 12, 30, 0, tzinfo=pytz.utc)\n state = float('nan'), float('nan')\n value = 300\n\n new_value, data_to_insert = db._combine(db.archives[0], old_timestamp, state, timestamp, value)\n\n self.assertEqual(new_value[0], 0)\n self.assertEqual(data_to_insert, [value])\n\n def testUpdate(self):\n filename, db = self.createDatabase()\n try:\n data, timestamp = [], self._create_kwargs['start']\n for i in xrange(1500):\n timestamp += datetime.timedelta(0, self._create_kwargs['interval'])\n data.append((timestamp, i))\n db.update(data)\n\n for i, archive in enumerate(db.archives):\n cycles, position = divmod(len(data) // archive['aggregation'], archive['count'])\n self.assertEqual(archive['cycles'], cycles, \"Archive %d (%d/%d)\" % (i, cycles, position))\n self.assertEqual(archive['position'], position, \"Archive %d (%d/%d)\" % (i, cycles, position))\n\n stored_data = list(db.fetch('average', 1800, data[0][0], data[-1][0] + datetime.timedelta(10000)))\n expected_data = data[-len(stored_data):]\n for i, (expected, actual) in enumerate(zip(expected_data, stored_data)):\n self.assertEqual(expected, actual, \"Mismatch at index %d\" % i)\n self.assertEqual(data[-len(stored_data):][-10:], stored_data[-10:])\n\n finally:\n os.unlink(filename)\n\n def testUpdateEmpty(self):\n filename, db = self.createDatabase()\n db.update([])\n\n def testTimestamps(self):\n local1 = pytz.timezone(\"Europe/London\")\n local2 = pytz.timezone(\"America/New_York\")\n tests = [datetime.datetime(2011, 1, 1, 0, 0),\n datetime.datetime(2011, 7, 1, 0, 0),\n datetime.datetime(2011, 3, 27, 1, 0),\n datetime.datetime(2011, 3, 27, 2, 0),\n datetime.datetime(2011, 10, 30, 1, 0),\n datetime.datetime(2011, 10, 30, 1, 30),\n datetime.datetime(2011, 10, 30, 2, 0)]\n new_tests = []\n for test in tests:\n new_tests.append(test.replace(tzinfo=pytz.utc))\n new_tests.append(test.replace(tzinfo=local1))\n new_tests.append(test.replace(tzinfo=local2))\n for test in new_tests:\n self.assertEqual(test,\n _from_timestamp(_to_timestamp(test)),\n \"Failed to round-trip %r (%r) -> %r -> %r\" % (test,\n test.astimezone(pytz.utc).timetuple(),\n _to_timestamp(test),\n _from_timestamp(_to_timestamp(test))))\n self.assertEqual(_to_timestamp(test), _to_timestamp(_from_timestamp(_to_timestamp(test))))\n\n def testWithGap(self):\n \"\"\"\n Make sure aggregation works when we have gaps\n \"\"\"\n filename, db = self.createDatabase()\n try:\n data, timestamp = [], db.start\n for i in xrange(100):\n timestamp += datetime.timedelta(0, random.randrange(1, 5000))\n data.append((timestamp, random.randrange(0, 100)))\n db.update(data)\n\n for archive in db.archives:\n stored_data = list(db.fetch(archive['aggregation_type'],\n archive['aggregation'] * db.interval,\n db.start,\n timestamp))\n # The following lines are useful for debugging a failing test\n #print\n #print '=' * 80\n #print '\\n'.join('%s,%s' % (ts.strftime('%Y-%m-%d %H:%M:%S'), unicode(val)) for ts, val in data)\n #print '-' * 80\n #print '\\n'.join('%s,%s' % (ts.strftime('%Y-%m-%d %H:%M:%S'), unicode(val)) for ts, val in stored_data)\n #print '=' * 80\n for ts, val in stored_data:\n if not isnan(val):\n self.assert_(0 <= val <= 100, \"%s is unexpectedly out of range\" % val)\n finally:\n os.unlink(filename)\n\n def testBatchUpdate(self):\n \"\"\"\n There was at one point a bug where updating in small groups lead to doubled values\n at the boundaries. Here we'll create one database in one go, and create the other\n in batches, before checking that they're identical.\n \"\"\"\n filename_once, db_once = self.createDatabase()\n filename_batch, db_batch = self.createDatabase()\n try:\n data, timestamp = [], db_once.start\n for i in xrange(200):\n timestamp += datetime.timedelta(0, random.randrange(1, 1800))\n data.append((timestamp, random.randrange(0, 100)))\n\n db_once.update(data)\n for i in xrange(0, len(data), 5):\n # Intentionally overlap\n db_batch.update(data[i:i + 10])\n\n for i, archive in enumerate(db_once.archives):\n data_once = list(db_once.fetch(archive['aggregation_type'],\n archive['aggregation'] * db_once.interval,\n db_once.start,\n timestamp))\n data_batch = list(db_batch.fetch(archive['aggregation_type'],\n archive['aggregation'] * db_batch.interval,\n db_batch.start,\n timestamp))\n self.assertEqual(data_once, data_batch, \"Archive %d\" % i)\n\n\n finally:\n os.unlink(filename_once)\n os.unlink(filename_batch)\n\nif __name__ == '__main__':\n unittest2.main()\n","repo_name":"ox-it/time-series-api","sub_path":"openorg_timeseries/database/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8672,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"21258866560","text":"# -*- coding: utf-8 -*-\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass ElectronicsSpider(CrawlSpider):\n name = 'electronics'\n allowed_domains = [\"www.olx.com.pk\"]\n start_urls = [\n 'https://www.olx.com.pk/computers-accessories/',\n 'https://www.olx.com.pk/tv-video-audio/',\n 'https://www.olx.com.pk/games-entertainment/'\n ]\n rules = (\n Rule(LinkExtractor(allow=(), restrict_css=('.pageNextPrev',)),\n # process_links=\"process_link\",\n callback='parse_item',\n follow=True),)\n\n def process_link(self, url):\n print('Processing..' + url)\n # print(response.text)\n\n def parse_item(self, response):\n print('Processing..' + response.url)\n\n # def parse(self, response):\n # pass\n\n\nprocess = CrawlerProcess()\nprocess.crawl(ElectronicsSpider)\nprocess.start()\n","repo_name":"folkol/tutorials","sub_path":"scrapy/olx/olx/spiders/electronics.py","file_name":"electronics.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40813251124","text":"import base64\nimport re\nimport urllib\nimport zlib\nfrom urllib.request import Request\n\nimport defusedxml.ElementTree as ET\nfrom retrying import retry\n\nfrom src.util.logging import log\n\n'''\nOriginal from: https://github.com/aggixx/PoBPreviewBot/blob/master/util.py \n && https://github.com/aggixx/PoBPreviewBot/blob/master/pastebin.py\n'''\n\n\ndef fetch_paste_key(content):\n \"\"\"\n Fetches the last paste key in a message.\n :param content: message.content\n :return: paste key to retrieve pastebin content\n \"\"\"\n if 'raw' in content:\n content = content.replace('raw/', '')\n regex = r\"pastebin.com\\/(\\S*)\"\n results = re.findall(regex, content)\n return results\n\n\ndef decode_base64_and_inflate(b64string):\n try:\n decoded_data = base64.b64decode(b64string)\n return zlib.decompress(decoded_data)\n except zlib.error as err:\n log.error(\"ZLib Error in paste: err={}. Data={}\".format(err, b64string))\n except ValueError as err:\n log.error(\"Value Error in paste: err={}\".format(err))\n\n\ndef decode_to_xml(enc, encoding='windows-1252'):\n enc = enc.replace(\"-\", \"+\").replace(\"_\", \"/\")\n xml_str = decode_base64_and_inflate(enc)\n log.debug(\"XML={}\".format(xml_str))\n xml = None\n try:\n xml = ET.fromstring(xml_str.decode(encoding))\n except TypeError as err:\n log.debug(\"Could not parse the pastebin as xml msg={}\".format(err))\n\n return xml\n\n\ndef urllib_error_retry(attempt_number, ms_since_first_attempt):\n delay = 1 * (2 ** (attempt_number - 1))\n log.error(\"An error occurred during get_url_data(). Sleeping for {:.0f}s before retrying...\".format(delay))\n return delay * 1000\n\n\n@retry(wait_exponential_multiplier=1000,\n stop_max_attempt_number=2,\n wait_func=urllib_error_retry)\ndef get_raw_data(url):\n q = Request(url)\n q.add_header('Cache-Control', 'max-age=0')\n try:\n url = urllib.request.urlopen(q)\n except urllib.error.HTTPError as e:\n return None\n content = url.read().decode('utf-8')\n if \"Possible Spam Detected\" in content:\n raise CaptchaError(\"Pastebin marked this as possible spam. Please reupload and clear captchas before retrying.\")\n\n return content # read and encode as utf-8\n\n\ndef get_as_xml(paste_key):\n raw_url = 'https://pastebin.com/raw/' + paste_key\n log.debug(\"Retrieved from raw_url={}\".format(raw_url))\n data = get_raw_data(raw_url)\n return data\n\n\nclass CaptchaError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass CaptchaError(Exception):\n def __init__(self, message):\n self.message = message\n","repo_name":"FWidm/discord-pob","sub_path":"src/util/pastebin.py","file_name":"pastebin.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"74287126837","text":"from typing import Any\nfrom .languages import iso_639\nfrom . import attribute_types\n\n\nclass Validators(object):\n @staticmethod\n def validate_comment_type(type: str) -> None:\n if type is None:\n return\n if type not in ('PLAIN_TEXT', 'HTML'):\n raise ValueError(f\"{type} is not a recognized comment type. Valid types are 'PLAIN_TEXT' and 'HTML'.\")\n\n @staticmethod\n def validate_context(context: attribute_types.CONTEXT) -> None:\n if context is None:\n return\n # iterate over list of dicts\n for entry in context:\n # iterate over keys in dict\n for key in entry:\n # validate keys\n if key not in ('text', 'type'):\n error = f\"{key} was listed as a key in context[{entry}], but is not recognized as a valid key.\"\n error += \" Valid keys are 'text' and 'type'.\"\n raise ValueError(error)\n # if 'type' is provided, validate it's value\n if 'type' in entry and entry['type'] not in ('PLAIN_TEXT'):\n raise ValueError(f\"{entry['type']} is not a valid context type. Valid types are 'PLAIN_TEXT'.\")\n\n @staticmethod\n def validate_requested_attributes(requested_attributes: attribute_types.REQUESTED_ATTRIBUTES) -> None:\n if requested_attributes is None:\n raise TypeError(\"requested_attributes can not be None.\")\n # iterate over listed keys\n for attribute in requested_attributes:\n # iterate over keys within each dict\n for key in requested_attributes[attribute]:\n if key == 'scoreType':\n if type(requested_attributes[attribute][key]) is not str:\n error = f\"requested_attributes.{attribute}.scoreType must be a string.\"\n error += f\"It currently is a(n) {type(requested_attributes[attribute][key])}.\"\n raise TypeError(error)\n elif requested_attributes[attribute][key] != \"PROBABILITY\":\n error = f\"PROBABILITY is the only accepted type for requested_attributes.{attribute}.scoreType.\"\n raise ValueError(error)\n elif key == 'scoreThreshold':\n if type(requested_attributes[attribute][key]) is not float:\n error = f\"requested_attributes.{attribute}.scoreThreshold must be a float.\"\n error += f\" It currently is a(n) {type(requested_attributes[attribute][key])}.\"\n raise TypeError(error)\n elif requested_attributes[attribute][key] > 1 or requested_attributes[attribute][key] < 0:\n error = f\"requested_attributes.{attribute}.scoreThreshold must be in the range [0, 1].\"\n raise ValueError(error)\n elif key not in ('scoreType', 'scoreThreshold'):\n error = f\"Invalid key '{key}' listed in requested_attributes.{attribute}.\"\n error += \" Valid keys are 'scoreType' and 'scoreThreshold'.\"\n raise ValueError(error)\n\n @staticmethod\n def validate_type(attribute: Any, attr_type: Any, variable_name: str, can_be_none: bool = True) -> None:\n if attribute is None:\n if can_be_none:\n return\n else:\n raise TypeError(f\"{variable_name} cannot be None.\")\n if type(attribute) is not attr_type:\n raise TypeError(f\"{variable_name} must be a {attr_type.__name__}.\")\n\n @staticmethod\n def validate_languages(languages: attribute_types.LANGUAGES) -> None:\n if languages is None:\n return\n for language in languages:\n if language not in iso_639:\n raise ValueError(f\"{language} is not a valid ISO 639-1 language.\")\n\n @staticmethod\n def validate_attribute_scores(attribute_scores: attribute_types.ATTRIBUTE_SCORES) -> None:\n # attributeScores\n for group in attribute_scores:\n # attributeScores.\n for key in attribute_scores[group]:\n if key == \"summaryScore\":\n for inner_key in attribute_scores[group][key]:\n # attributeScores..summaryScore.value\n if inner_key == \"value\" and type(attribute_scores[group][key][inner_key]) is not float:\n raise TypeError(f\"attribute_scores.{group}.summaryScore.value must be a float.\")\n # attributeScores..summaryScore.type\n elif inner_key == \"type\" and type(attribute_scores[group][key][inner_key]) is not str:\n raise TypeError(f\"attribute_scores.{group}.summaryScore.type must be a string.\")\n elif key == \"spanScores\":\n for idx in range(len(attribute_scores[group][key])):\n for inner_key in attribute_scores[group][key][idx]:\n # attributeScores..spanScores.(begin|end)\n if inner_key in (\"begin\", \"end\") and type(attribute_scores[group][key][idx][inner_key]) is not int:\n raise TypeError(f\"attribute_scores.{group}.spanScores[{idx}].{inner_key} must be a int.\")\n # attributeScores..spanScores.score\n elif inner_key == \"score\":\n for innermost_key in attribute_scores[group][key][idx][inner_key]:\n value = attribute_scores[group][key][idx][inner_key][innermost_key]\n path = f\"attribute_scores.{group}.spanScores[{idx}].score\"\n # attributeScores..spanScores.score.value\n if innermost_key == \"value\":\n if type(value) is not float:\n error = f\"{path}.value must be a float.\"\n raise TypeError(error)\n elif value > 1.0 or value < 0.0:\n error = f\"{path}.value not in [0,1].\"\n raise ValueError(error)\n # attributeScores..spanScores.type\n elif innermost_key == \"type\" and type(value) is not str:\n error = f\"{path}.type must be a string.\"\n raise TypeError(error)\n elif innermost_key not in ('value', 'type'):\n error = f\"Invalid key at {path}.{innermost_key}.\"\n raise ValueError(error)\n else:\n raise ValueError(f\"{key} is an invalid key in attribute_scores.{group}.\")\n","repo_name":"Conway/perspective","sub_path":"perspective/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"4"} +{"seq_id":"20971386265","text":"import sys\nfrom collections import deque\n\ninput= sys.stdin.readline\n\nN, M = map(int, input().split())\n\n\nroute = []\nfor _ in range(N):\n route.append(list(map(int, input().split())))\n\nsx,sy,sd = map(int, input().split())\nex,ey,ed = map(int, input().split())\n\n\nsx,sy,sd = sx -1 ,sy -1, sd -1\nex,ey,ed = ex-1 ,ey -1 ,ed -1\n\n\ngraph = [[[0]*4 for _ in range(M) ]for _ in range(N)]\nvisited = [[[False]*4 for _ in range(M) ]for _ in range(N)]\n\nqueue = deque()\ndxy = [[0,1],[0,-1],[1,0],[-1,0]]\n\ngraph[sx][sy][sd] = 0\nvisited[sx][sy][sd] = True\nqueue.append([sx,sy,sd])\n\nro = [[2,3],[2,3],[0,1],[0,1]]\n\nwhile queue:\n x,y,d = queue.popleft()\n if x == ex and y == ey and d == ed:\n print(graph[x][y][d])\n break\n \n for k in range(4):\n\n if not visited[x][y][k]:\n if k in ro[d]:\n graph[x][y][k] += graph[x][y][d] + 1\n visited[x][y][k] = True\n queue.append([x,y,k])\n # else:\n # graph[x][y][k] += graph[x][y][d] + 2\n # visited[x][y][k] = True\n # queue.append([x,y,k])\n\n nx, ny = x, y\n for kk in range(3):\n nx, ny = nx+dxy[d][0], ny+dxy[d][1]\n if nx < 0 or ny < 0 or nx >= N or ny >= M:\n continue\n if route[nx][ny] == 1:\n break\n if visited[nx][ny][d]:\n continue\n \n \n visited[nx][ny][d] = True\n graph[nx][ny][d] += (graph[x][y][d]+1)\n queue.append([nx,ny,d])\n\n\n\n\n # for g in graph:\n # print(g)\n # print()\n\n # for g in visited:\n # print(g)\n # print()","repo_name":"Kimjimin97/Algorithms","sub_path":"8월40문제/로봇.py","file_name":"로봇.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20336398224","text":"# Solution 1\nn = int(input())\na = set(map(int, input().split()))\nfor i in range(0, int(input())):\n com = list(map(str, input().strip().split()))\n if com[0] == 'pop':\n a.pop()\n elif com[0] == 'remove':\n a.remove(int(com[1]))\n elif com[0] == 'discard':\n a.discard(int(com[1]))\n else:\n pass\nprint(sum(a))\n\n# Solution 2\nn = input()\nelements = set(map(int, input().split()))\n\nfor _ in range(int(input())):\n command = input().split()\n operation = command[0]\n args = command[1:]\n\n if operation != 'pop':\n operation += '(' + ','.join(args) + ')'\n eval('elements.' + operation)\n else:\n elements.pop()\nprint(sum(elements))\n","repo_name":"subhadeep-123/HackerRank","sub_path":"Python/004. Sets/05. Set .discard(), .remove() & .pop().py","file_name":"05. Set .discard(), .remove() & .pop().py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"25512969598","text":"import numpy as np\nfrom src.utils import train_test_split\nimport pytest\n\nn = 100\nd = 5\nN = 10\n\n\n@pytest.mark.parametrize(\"X\", [np.random.rand(n, d) for _ in range(N)])\n@pytest.mark.parametrize(\"y\", [np.random.rand(n) for _ in range(N)])\n@pytest.mark.parametrize(\"ratio\",\n [np.random.uniform(0.1, 0.9) for _ in range(N)])\ndef test_split(X, y, ratio):\n X_train, y_train, X_test, y_test = train_test_split(X, y, ratio)\n print(ratio)\n assert abs(int(X.shape[0] * ratio) - X_train.shape[0]) <= 1\n assert abs(int(y.shape[0] * ratio) - y_train.shape[0]) <= 1\n assert X_train.shape[0] + X_test.shape[0] == X.shape[0]\n assert y_train.shape[0] + y_test.shape[0] == y.shape[0]\n","repo_name":"kosa98/eng_pract","sub_path":"tests/test_split.py","file_name":"test_split.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14917339512","text":"__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport os\nfrom pathlib import Path\n\nfrom pkg_resources import resource_filename\n\nfrom .components import *\nfrom .helper import print_result, write_html, download_data, \\\n index_generator, query_generator\nfrom ..flow import Flow\nfrom ..helper import countdown, colored\n\n\ndef hello_world(args):\n \"\"\"The hello world of Jina. Use it via CLI :command:`jina hello-world`.\n\n It downloads Fashion-MNIST dataset and indexes 50,000 images via Jina search framework.\n The index is stored into 4 *shards*. We then randomly sample 128 unseen images as *Queries*,\n ask Jina to retrieve relevant results.\n\n More options can be found in :command:`jina hello-world --help`\n \"\"\"\n Path(args.workdir).mkdir(parents=True, exist_ok=True)\n\n targets = {\n 'index-labels': {\n 'url': args.index_labels_url,\n 'filename': os.path.join(args.workdir, 'index-labels')\n },\n 'query-labels': {\n 'url': args.query_labels_url,\n 'filename': os.path.join(args.workdir, 'query-labels')\n },\n 'index': {\n 'url': args.index_data_url,\n 'filename': os.path.join(args.workdir, 'index-original')\n },\n 'query': {\n 'url': args.query_data_url,\n 'filename': os.path.join(args.workdir, 'query-original')\n }\n }\n\n # download the data\n download_data(targets, args.download_proxy)\n\n # this envs are referred in index and query flow YAMLs\n os.environ['RESOURCE_DIR'] = resource_filename('jina', 'resources')\n os.environ['SHARDS'] = str(args.shards)\n os.environ['PARALLEL'] = str(args.parallel)\n os.environ['HW_WORKDIR'] = args.workdir\n\n # reduce the network load by using `fp16`, or even `uint8`\n os.environ['JINA_ARRAY_QUANT'] = 'fp16'\n\n # now comes the real work\n # load index flow from a YAML file\n f = Flow.load_config(args.uses_index)\n\n # run it!\n with f:\n f.index(index_generator(num_docs=targets['index']['data'].shape[0], target=targets),\n request_size=args.index_request_size)\n\n # wait for couple of seconds\n countdown(8, reason=colored('behold! im going to switch to query mode', 'cyan',\n attrs=['underline', 'bold', 'reverse']))\n\n # now load query flow from another YAML file\n f = Flow.load_config(args.uses_query)\n # run it!\n with f:\n f.search(query_generator(num_docs=args.num_query, target=targets, with_groundtruth=True),\n shuffle=True,\n on_done=print_result,\n request_size=args.query_request_size,\n top_k=args.top_k)\n\n # write result to html\n write_html(os.path.join(args.workdir, 'hello-world.html'))\n","repo_name":"parmarjh/JINA","sub_path":"jina/helloworld/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18918722425","text":"from machine import Pin, PWM\nimport servo\nimport time\n\n# the default position will be\npos_on = 0\npos_mid = 90\npos_off = 179\ns = servo.Servo(Pin(0))\n\n\ndef turn(angle):\n global s\n s.init()\n print(\"Turning to\", angle)\n s.write_angle(angle)\n time.sleep(0.7) # let it turn\n s.release()\n\n\ndef on():\n turn(pos_on)\n turn(pos_mid)\n\n\ndef off():\n turn(pos_off)\n turn(pos_mid)\n","repo_name":"ulno/ulnoiot-upy","sub_path":"examples/obsolete/device_types/servo_switch/servo_switch.py","file_name":"servo_switch.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"4"} +{"seq_id":"70749767158","text":"# -*- coding: utf-8 -*-\n\nfrom kitconcept.glossary.interfaces import IGlossarySettings\nfrom plone import api\nfrom plone.restapi.services import Service\n\n\nSERVICE_NAME = \"@glossary_terms\"\n\n\nclass GetGlossaryTerms(Service):\n def _error(self, status, type, message):\n self.request.response.setStatus(status)\n return {\"error\": {\"type\": type, \"message\": message}}\n\n def reply(self):\n brains = api.content.find(context=self.context, portal_type=\"GlossaryTerm\")\n\n terms = [\n {\n \"id\": brain[\"id\"],\n \"title\": brain[\"Title\"],\n \"terms\": [brain[\"Title\"]] + list(brain[\"variants\"]),\n \"definition\": brain[\"definition\"] or \"\",\n \"url\": brain.getURL(),\n }\n for brain in brains\n ]\n settings = {\n \"enabled\": api.portal.get_registry_record(\n name=\"enable_tooltip\",\n interface=IGlossarySettings,\n default=False,\n ),\n \"enabled_types\": api.portal.get_registry_record(\n name=\"enabled_content_types\",\n interface=IGlossarySettings,\n default=[],\n ),\n }\n\n return {\n \"terms\": terms,\n \"settings\": settings,\n }\n","repo_name":"kitconcept/kitconcept.glossary","sub_path":"src/kitconcept/glossary/api/services/glossary/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"28378398100","text":"from score_keeper import PlayerScore, TopPlayers\n\nscoreboard = TopPlayers()\nscores = {\"c\": 3, \"a\": 7, \"b\": 2, \"e\": 5, \"f\": 6, \"d\": 1}\nfor key, value in scores.items():\n ps = PlayerScore()\n ps.score = value\n ps.name = key\n scoreboard.insert_score(ps)\n print(\"Adding: \" + key + \",\" + str(value))\n\nfor player in scoreboard.top_5_players:\n print(player.name + \" \" + str(player.score))\n","repo_name":"ikxcode/school","sub_path":"Coursework/tests/test_score_keeper.py","file_name":"test_score_keeper.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70281657397","text":"import os\nfrom flask import (Flask, flash, render_template, redirect, request, session, url_for)\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nfrom werkzeug.security import generate_password_hash, check_password_hash\nif os.path.exists(\"env.py\"):\n import env\n\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\n@app.route(\"/get_recipes\")\ndef get_recipes():\n recipes = list(mongo.db.recipes.find())\n return render_template(\"recipes.html\", recipes=recipes)\n\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n # check if username already exits\n existing_user = mongo.db.users.find_one({\"username\": request.form.get(\"username\").lower()})\n\n if existing_user:\n flash(\"Username already exists\")\n return redirect(url_for(\"register\"))\n\n register = {\n \"username\": request.form.get(\"username\").lower(),\n \"password\": generate_password_hash(request.form.get(\"password\")),\n \"firstname\": request.form.get(\"firstname\"),\n \"lastname\": request.form.get(\"lastname\"),\n \"email\": request.form.get(\"email\")\n }\n mongo.db.users.insert_one(register)\n\n # put the new user in 'session' cookie\n session[\"user\"] = request.form.get(\"username\").lower()\n flash(\"Registration sucessful\")\n return redirect(url_for(\"profile\", username=session[\"user\"]))\n\n return render_template(\"register.html\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"POST\":\n # check if username already exits\n existing_user = mongo.db.users.find_one({\"username\": request.form.get(\"username\").lower()})\n\n if existing_user:\n # check if hashed password matches user input\n if check_password_hash(existing_user[\"password\"], \n request.form.get(\"password\")):\n session[\"user\"] = request.form.get(\"username\").lower()\n flash(\"Welcome, {}\".format(request.form.get(\"username\")))\n return redirect(url_for(\"profile\", username=session[\"user\"]))\n\n else:\n # invalid password\n flash(\"Incorrect Username and/or Password\")\n return redirect(url_for('login'))\n\n else:\n # username does not exist\n flash(\"Incorrect Username and/or Password\")\n return redirect(url_for('login'))\n\n return render_template(\"login.html\")\n\n\n@app.route(\"/profile/\", methods=[\"GET\", \"POST\"])\ndef profile(username):\n # grab the session user's username from db\n username = mongo.db.users.find_one({\"username\": session['user']})[\"username\"]\n \n if session['user']:\n return render_template(\"profile.html\", username=username)\n\n return redirect(url_for(\"login\"))\n\n \n@app.route(\"/logout\")\ndef logout():\n # remove user from session cookies\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/add_recipe\", methods=[\"GET\", \"POST\"])\ndef add_recipe():\n if request.method == \"POST\":\n recipe = {\n \"catagory_name\": request.form.get(\"catagory_name\"),\n \"recipe_name\": request.form.get(\"recipe_name\"),\n \"ingredients\": request.form.get(\"ingredients\"),\n \"recipe_description\": request.form.get(\"recipe_description\"),\n \"video\": request.form.get(\"video\"),\n \"created_by\": session[\"user\"]\n }\n mongo.db.recipes.insert_one(recipe)\n flash(\"Recipe Succesfully Added\")\n return redirect(url_for(\"get_recipes\"))\n\n catagories = mongo.db.catagories.find().sort(\"catagory_name\", 1)\n return render_template(\"add_recipe.html\", catagories=catagories)\n\n\n@app.route(\"/edit_recipe/\", methods=[\"GET\", \"POST\"])\ndef edit_recipe(recipe_id):\n \n if request.method == \"POST\":\n submit = {\n \"catagory_name\": request.form.get(\"catagory_name\"),\n \"recipe_name\": request.form.get(\"recipe_name\"),\n \"ingredients\": request.form.get(\"ingredients\"),\n \"recipe_description\": request.form.get(\"recipe_description\"),\n \"video\": request.form.get(\"video\"),\n \"created_by\": session[\"user\"]\n }\n mongo.db.recipes.update({\"_id\": ObjectId(recipe_id)}, submit)\n flash(\"Recipe Succesfully Updated\")\n return redirect(url_for(\"get_recipes\"))\n \n\n recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n catagories = mongo.db.catagories.find().sort(\"catagory_name\", 1)\n return render_template(\"edit_recipe.html\", recipe=recipe, catagories=catagories)\n\n\n\n@app.route(\"/delete_recipe/\")\ndef delete_recipe(recipe_id):\n mongo.db.recipes.remove({\"_id\": ObjectId(recipe_id)})\n flash(\"Recipe Successfully Deleted\")\n return redirect(url_for(\"get_recipes\"))\n\n\n@app.route(\"/get_catagories\")\ndef get_catagories():\n catagories = list(mongo.db.catagories.find().sort(\"catagory_name\", 1))\n return render_template(\"catagories.html\", catagories=catagories)\n\n\n@app.route(\"/add_catagory\", methods= [\"GET\", \"POST\"])\ndef add_catagory():\n if request.method == \"POST\":\n catagory = {\n \"catagory_name\": request.form.get(\"catagory_name\")\n }\n mongo.db.catagories.insert_one(catagory)\n flash(\"New Catagory successfully added\")\n return redirect(url_for(\"get_catagories\"))\n\n return render_template(\"add_catagory.html\")\n\n\n@app.route(\"/edit_catagory/\", methods=[\"GET\", \"POST\"])\ndef edit_catagory(catagory_id):\n if request.method == \"POST\":\n submit = {\n \"catagory_name\": request.form.get(\"catagory_name\")\n }\n mongo.db.catagories.update({\"_id\": ObjectId(catagory_id)}, submit)\n flash(\"Catagory Successfully Updated\")\n return redirect(url_for(\"get_catagories\"))\n catagory = mongo.db.catagories.find_one({\"_id\": ObjectId(catagory_id)})\n return render_template(\"edit_catagory.html\", catagory=catagory)\n\n\n\n@app.route(\"/delete_catagory/\")\ndef delete_catagory(catagory_id):\n mongo.db.catagories.remove({\"_id\": ObjectId(catagory_id)})\n flash(\"Catagory Successfully Deleted\")\n return redirect(url_for(\"get_catagories\"))\n\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\")),\n debug=False)\n\n\n","repo_name":"debarati-datta/debarati-the-food-tycoon","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"69886618359","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework.views import status\nfrom .models import Beverage\nfrom .serializers import BeverageSerializer\n\n# Test creation for GET request\n\nclass BaseViewTest(APITestCase):\n client = APIClient()\n\n @staticmethod\n def create_beverage(name=\"\", price=\"\", quantity=\"\"):\n if name != \"\" and price != \"\" and quantity != \"\":\n Beverage.objects.create(name=name, price=price, quantity=quantity)\n\n def setUp(self):\n # add test data\n self.create_beverage(\"Mirinda\", 4, 10)\n self.create_beverage(\"Mr. Pibb\", 2, 6)\n self.create_beverage(\"Blueberry Faygo\", 1, 3)\n\n\nclass GetAllBeveragesTest(BaseViewTest):\n\n def test_get_all_beveragess(self):\n # hit the API endpoint\n response = self.client.get(\n reverse(\"beverages-all\", kwargs={\"version\": \"v1\"})\n )\n # fetch the data from db\n expected = Beverage.objects.all()\n serialized = BeverageSerializer(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","repo_name":"kellycook301/vendoMANIAC","sub_path":"vendo_matic/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9103649486","text":"import types\nimport tensorflow as tf\nlayers = tf.layers\nfrom tf_src import tf_logger as log\n\n\nvar_init = tf.variance_scaling_initializer\n\n\nclass Model:\n NAME = 'convnet_semantic_segmentation'\n DATA_FORMAT = 'channels_first'\n FILTER_COUNT = [128, 64, 32]\n\n def __init__(self, dataset, is_training):\n self.dataset = dataset\n self.is_training = is_training\n\n self._graph = None\n self._train = None\n self._optimizer = None\n self._test = None\n self._inference = None\n self._loss_func = None\n self._train_metrics = None\n self._test_metrics = None\n\n def build_graph(self, x):\n if self._graph:\n return self._graph(x)\n\n concat_axis = 1 if Model.DATA_FORMAT=='channels_first' else 3\n\n def model_fn(inputs):\n log.tensor_shape(inputs)\n outputs = []\n\n for i, f in enumerate(Model.FILTER_COUNT):\n with tf.variable_scope('downconv-{}'.format(i)):\n inputs = Model.conv(inputs, f)\n outputs.append(inputs)\n log.tensor_shape(inputs)\n inputs = Model.down_sample(inputs)\n\n with tf.variable_scope('intermediary_conv'):\n inputs = Model.conv(inputs, Model.FILTER_COUNT[-1]//2)\n\n for i, f in enumerate(reversed(Model.FILTER_COUNT)):\n with tf.variable_scope('upconv-{}'.format(i)):\n inputs = Model.up_sample(inputs)\n inputs = tf.concat([inputs, outputs[-(i+1)]],\n concat_axis,\n name='concatenated')\n inputs = Model.conv(inputs, f)\n log.tensor_shape(inputs)\n\n with tf.variable_scope('out_conv'):\n inputs = Model.conv(inputs,\n self.dataset.NUM_LABELS,\n activation=None)\n log.tensor_shape(inputs)\n return inputs\n\n self._graph = model_fn\n return self._graph(x)\n\n @staticmethod\n def conv(x,\n num_filters,\n padding='same',\n strides=1,\n activation=tf.nn.relu):\n kernel_size = 3\n x = layers.conv2d(x,\n num_filters,\n kernel_size,\n padding=padding,\n strides=strides,\n kernel_initializer=var_init,\n use_bias=True,\n data_format=Model.DATA_FORMAT,\n name='downconv')\n\n if activation:\n return tf.check_numerics(activation(x), 'numeric check failed')\n else:\n return tf.check_numerics(x, 'numeric check failed')\n\n @staticmethod\n def down_sample(x):\n pool_size = 2\n stride = 2\n return layers.max_pooling2d(x,\n pool_size,\n stride,\n data_format=Model.DATA_FORMAT,\n name='downsampler')\n\n @staticmethod\n def up_sample(x):\n def wrap_tranpose(f, _x):\n _x = tf.transpose(_x, [0, 2, 3, 1])\n _x = f(_x)\n _x = tf.transpose(_x, [0, 3, 1, 2])\n\n return _x\n\n def f(_x):\n _, w, h, _ = _x.shape.as_list()\n\n _x = tf.image.resize_images(_x,\n (w*2, h*2),\n method=tf.image.ResizeMethod.BILINEAR)\n\n return _x\n\n if Model.DATA_FORMAT=='channels_first':\n return wrap_tranpose(f, x)\n else:\n return f(x)\n\n @staticmethod\n def _segmap(y):\n y = tf.cast(tf.argmax(y, -1), dtype=tf.uint8)[:,:,:,tf.newaxis]\n return (-10 * y) + 50\n\n def train(self):\n x, y = self.dataset.train\n logits = self.build_graph(x)\n\n tf.summary.image('input_image', x)\n tf.summary.image('output', Model._segmap(logits))\n tf.summary.image('label', Model._segmap(y))\n\n optimize = self.optimizer(self.loss_func(logits, y))\n\n metrics = self.train_metrics(logits, y)\n\n return optimize, metrics\n\n def test(self):\n x, y = self.dataset.test\n logits = self.build_graph(x)\n\n #tf.summary.image('input_image', x)\n #tf.summary.image('output', Model._segmap(logits))\n #tf.summary.image('label', Model._segmap(y))\n\n metrics = self.test_metrics(logits, y)\n\n return logits, metrics\n\n def train_metrics(self, logits, y):\n if self._train_metrics:\n return self._train_metrics(logits, y)\n\n def f(logits, ys):\n log.warn('No training metrics set')\n return logits, ys\n\n self._train_metrics = f\n\n return self._train_metrics(logits, y)\n\n def test_metrics(self, logits, y):\n if self._test_metrics:\n return self._test_metrics(logits, y)\n\n def f(logits, y):\n log.warn('No testing metrics set')\n return tf.constant(0)\n\n self._test_metrics = f\n return self._test_metrics(logits, y)\n\n def optimizer(self, loss):\n if self._optimizer:\n return self._optimizer(loss)\n\n def optimize(loss):\n log.warn('No optimizer set')\n\n self._optimizer = optimize\n\n return self._optimizer(loss)\n\n def loss_func(self, x, y):\n if self._loss_func:\n return self._loss_func(x, y)\n\n def f(self, x, y):\n log.warn('Using default loss cross entropy')\n return tf.nn.softmax_cross_entropy_with_logits_v2(logits=x,\n labels=y)\n\n self._loss_func = f\n\n return self._loss_func(x, y)\n\n def inference(self, x):\n return tf.nn.softmax(self.build_graph(x))\n\n\ndef main():\n\n tf.logging.set_verbosity(tf.logging.DEBUG)\n print(info)\n\n in_shape = [5,1,40,40]\n expect_out_shape = [5,5,40,40]\n\n x = tf.placeholder(tf.float32, shape=in_shape)\n\n mock_dataset = types.SimpleNamespace(NUM_LABELS=5)\n Model.DATA_FORMAT = 'channels_first'\n m = Model(mock_dataset, True)\n\n out = m.build_graph(x)\n\n assert out.shape.as_list()==expect_out_shape, \"Incorrect Shape\"\n\nif __name__=='__main__':\n main()\n","repo_name":"ryanhausen/galaxy-classification","sub_path":"src/convnet_semantic_segmentation.py","file_name":"convnet_semantic_segmentation.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"74725173878","text":"import os\nimport sys\nimport torch\ntorch.set_printoptions(profile=\"full\")\nimport shutil\nimport numpy as np\nnp.set_printoptions(threshold=sys.maxsize)\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom models import GnnNets, GnnNets_NC\nfrom load_dataset import get_dataset, get_dataloader\nfrom Configures_finetune import data_args, train_args, model_args\nfrom sklearn.metrics import roc_auc_score\n\nfrom tensorboardX import SummaryWriter\n\n# train for graph classification\ndef train_GC():\n # attention the multi-task here\n print('start loading data====================')\n dataset = get_dataset(data_args)\n input_dim = dataset.num_node_features\n # output_dim = int(dataset.num_classes)\n #Bunch of classification tasks\n if data_args.dataset_name == \"tox21\":\n output_dim = 12\n elif data_args.dataset_name == \"hiv\":\n output_dim = 1\n elif data_args.dataset_name == \"pcba\":\n output_dim = 128\n elif data_args.dataset_name == \"muv\":\n output_dim = 17\n elif data_args.dataset_name == \"bace\":\n output_dim = 1\n elif data_args.dataset_name == \"bbbp\":\n output_dim = 1\n elif data_args.dataset_name == \"toxcast\":\n output_dim = 617\n elif data_args.dataset_name == \"sider\":\n output_dim = 27\n elif data_args.dataset_name == \"clintox\":\n output_dim = 2\n elif data_args.dataset_name == \"zinc_standard_agent\":\n output_dim = 1\n else:\n raise ValueError(\"Invalid dataset name.\")\n dataloader = get_dataloader(dataset, data_args, train_args)\n\n print('start training model==================')\n gnnNets = GnnNets(input_dim, output_dim, model_args)\n if not model_args.input_model_file == \"\":\n print('Loading pre-trained model...')\n model_dict = gnnNets.state_dict()\n save_model = torch.load(model_args.input_model_file)\n state_dict = {k:v for k,v in save_model.items() if k in model_dict.keys()}\n print(state_dict.keys())\n model_dict.update(state_dict)\n gnnNets.load_state_dict(model_dict)\n # gnnNets.update_state_dict(torch.load(model_args.input_model_file))\n gnnNets.to_device()\n criterion = nn.BCEWithLogitsLoss(reduction = \"none\")\n optimizer = Adam(gnnNets.parameters(), lr=train_args.learning_rate, weight_decay=train_args.weight_decay)\n\n avg_nodes = 0.0\n avg_edge_index = 0.0\n for i in range(len(dataset)):\n avg_nodes += dataset[i].x.shape[0]\n avg_edge_index += dataset[i].edge_index.shape[1]\n avg_nodes /= len(dataset)\n avg_edge_index /= len(dataset)\n print(f\"graphs {len(dataset)}, avg_nodes{avg_nodes :.4f}, avg_edge_index_{avg_edge_index/2 :.4f}\")\n\n best_acc = 0.0\n best_roc = 0.0\n data_size = len(dataset)\n print(f'The total num of dataset is {data_size}')\n\n # save path for model\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n if not os.path.isdir(os.path.join('checkpoint', data_args.dataset_name)):\n os.mkdir(os.path.join('checkpoint', f\"{data_args.dataset_name}\"))\n ckpt_dir = f\"./checkpoint/{data_args.dataset_name}/\"\n\n early_stop_count = 0\n for epoch in range(train_args.max_epochs):\n acc = []\n loss_list = []\n gnnNets.train()\n for batch in dataloader['train']:\n logits, probs, _ = gnnNets(batch)\n _, prediction = torch.max(logits, -1)\n y = batch.y.view(logits.shape).to(torch.float64)\n # loss = criterion(logits, batch.y)\n\n #Whether y is non-null or not.\n is_valid = y >= 0\n # print(is_valid)\n #Loss matrix\n loss_mat = criterion(logits.double(), y)\n #loss matrix after removing null target\n loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))\n \n optimizer.zero_grad()\n loss = torch.sum(loss_mat)/torch.sum(is_valid)\n loss.backward()\n\n torch.nn.utils.clip_grad_value_(gnnNets.parameters(), clip_value=2.0)\n optimizer.step()\n\n ## record\n loss_list.append(loss.item())\n # print(prediction.size(), batch.y.size())\n # acc.append(prediction.eq(batch.y).cpu().numpy())\n train_roc = evaluate_roc(gnnNets, dataloader['train'])\n\n # report train msg\n print(f\"Train Epoch:{epoch} |Loss: {np.average(loss_list):.3f} | \"\n # f\"Acc: {np.concatenate(acc, axis=0).mean():.3f} | \"\n f\"ROC: {train_roc:.3f}\")\n\n # report eval msg\n eval_state = evaluate_GC(dataloader['eval'], gnnNets, criterion)\n eval_roc = evaluate_roc(gnnNets, dataloader['eval'])\n print(f\"Eval Epoch: {epoch} | Loss: {eval_state['loss']:.3f} | ROC: {eval_roc:.3f}\")\n\n # only save the best model\n is_best = (eval_roc > best_roc)\n\n if eval_roc > best_roc:\n early_stop_count = 0\n else:\n early_stop_count += 1\n\n if early_stop_count > train_args.early_stopping:\n break\n\n if is_best:\n # best_acc = eval_state['acc']\n best_roc = eval_roc\n early_stop_count = 0\n if is_best or epoch % train_args.save_epoch == 0:\n save_best(ckpt_dir, epoch, gnnNets, model_args.model_name, is_best)\n\n print(f\"The best validation ROC is {best_roc}.\")\n # report test msg\n checkpoint = torch.load(os.path.join(ckpt_dir, f'{model_args.model_name}_finetune_best.pth'))\n gnnNets.update_state_dict(checkpoint['net'])\n test_state, _, _ = test_GC(dataloader['test'], gnnNets, criterion)\n test_roc = evaluate_roc(gnnNets, dataloader['test'])\n print(f\"Test: | Loss: {test_state['loss']:.3f} | ROC: {test_roc:.3f}\")\n\n\ndef evaluate_GC(eval_dataloader, gnnNets, criterion):\n acc = []\n loss_list = []\n gnnNets.eval()\n with torch.no_grad():\n for batch in eval_dataloader:\n logits, probs, _ = gnnNets(batch)\n _, prediction = torch.max(logits, -1)\n y = batch.y.view(logits.shape).to(torch.float64)\n #Whether y is non-null or not.\n is_valid = y >= 0\n #Loss matrix\n loss_mat = criterion(logits.double(), y)\n #loss matrix after removing null target\n loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))\n loss = torch.sum(loss_mat)/torch.sum(is_valid)\n\n ## record\n loss_list.append(loss.item())\n # acc.append(prediction.eq(batch.y).cpu().numpy())\n\n eval_state = {'loss': np.average(loss_list)}\n # 'acc': np.concatenate(acc, axis=0).mean()}\n\n return eval_state\n\ndef evaluate_roc(gnnNets, loader):\n gnnNets.eval()\n y_true = []\n y_scores = []\n\n for batch in loader:\n # batch = batch.to(device)\n\n with torch.no_grad():\n logits, probs, _ = gnnNets(batch)\n # _, pred = torch.max(logits, -1)\n\n y_true.append(batch.y.view(logits.shape))\n y_scores.append(logits)\n\n y_true = torch.cat(y_true, dim = 0).cpu().numpy()\n y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()\n # print(y_true.shape[1])\n\n roc_list = []\n for i in range(y_true.shape[1]):\n #AUC is only defined when there is at least one positive data.\n if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:\n # y_true = torch.where(y_true==1, loss_mat, torch.zeros(y_true.shape).to(y_true.device).to(y_true.dtype))\n is_valid = y_true[:,i] >= 0\n # print(y_true[is_valid,i])\n roc_list.append(roc_auc_score(y_true[is_valid,i], y_scores[is_valid,i]))\n\n if len(roc_list) < y_true.shape[1]:\n print(\"Some target is missing!\")\n print(\"Missing ratio: %f\" %(1 - float(len(roc_list))/y_true.shape[1]))\n\n return sum(roc_list)/len(roc_list) #y_true.shape[1]\n\ndef test_GC(test_dataloader, gnnNets, criterion):\n acc = []\n loss_list = []\n pred_probs = []\n predictions = []\n gnnNets.eval()\n with torch.no_grad():\n for batch in test_dataloader:\n logits, probs, _ = gnnNets(batch)\n _, prediction = torch.max(logits, -1)\n y = batch.y.view(logits.shape).to(torch.float64)\n #Whether y is non-null or not.\n is_valid = y >= 0\n #Loss matrix\n loss_mat = criterion(logits.double(), y)\n #loss matrix after removing null target\n loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))\n loss = torch.sum(loss_mat)/torch.sum(is_valid)\n\n # record\n loss_list.append(loss.item())\n # acc.append(prediction.eq(batch.y).cpu().numpy())\n predictions.append(prediction)\n pred_probs.append(probs)\n\n test_state = {'loss': np.average(loss_list)}\n # 'acc': np.average(np.concatenate(acc, axis=0).mean())}\n\n pred_probs = torch.cat(pred_probs, dim=0).cpu().detach().numpy()\n predictions = torch.cat(predictions, dim=0).cpu().detach().numpy()\n return test_state, pred_probs, predictions\n\n\ndef save_best(ckpt_dir, epoch, gnnNets, model_name, is_best):\n print('saving....')\n gnnNets.to('cpu')\n state = {\n 'net': gnnNets.state_dict(),\n 'epoch': epoch\n # 'acc': eval_acc\n }\n pth_name = f\"{model_name}_finetune_latest.pth\"\n best_pth_name = f'{model_name}_finetune_best.pth'\n ckpt_path = os.path.join(ckpt_dir, pth_name)\n torch.save(state, ckpt_path)\n if is_best:\n shutil.copy(ckpt_path, os.path.join(ckpt_dir, best_pth_name))\n gnnNets.to_device()\n\n\nif __name__ == '__main__':\n import sys\n globals()[sys.argv[1]]()\n","repo_name":"BIBM2021-MolCLE/MolCLE","sub_path":"finetune_molcle.py","file_name":"finetune_molcle.py","file_ext":"py","file_size_in_byte":9795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17570370749","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom selenium.webdriver.common.keys import Keys\n\nfrom nautobot.core.testing.integration import SeleniumTestCase\nfrom nautobot.dcim.models import Device\nfrom nautobot.extras.models import DynamicGroup\n\nfrom . import create_test_device\n\n\nclass DynamicGroupTestCase(SeleniumTestCase):\n \"\"\"\n Integration test to check nautobot.extras.models.DynamicGroup add/edit functionality.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.user.is_superuser = True\n self.user.save()\n self.login(self.user.username, self.password)\n\n def tearDown(self):\n self.logout()\n super().tearDown()\n\n def test_create_and_update(self):\n \"\"\"\n Test initial add and then update of a new DynamicGroup\n \"\"\"\n devices = [create_test_device() for _ in range(5)]\n content_type = ContentType.objects.get_for_model(Device)\n ct_label = f\"{content_type.app_label}.{content_type.model}\"\n\n # Navigate to the DynamicGroups list view\n self.browser.links.find_by_partial_text(\"Organization\").click()\n self.browser.links.find_by_partial_text(\"Dynamic Groups\").click()\n\n # Click add button\n self.browser.find_by_id(\"add-button\").click()\n\n # Fill out the form.\n name = \"devices-active\"\n self.browser.fill(\"name\", name)\n self.browser.select(\"content_type\", ct_label)\n\n # Click that \"Create\" button\n self.browser.find_by_text(\"Create\").click()\n\n # Verify form redirect and presence of content.\n self.assertTrue(self.browser.is_text_present(f\"Created dynamic group {name}\"))\n self.assertTrue(self.browser.is_text_present(\"Edit\"))\n\n # Edit the newly created DynamicGroup (Click that \"Edit\" button)\n self.browser.find_by_id(\"edit-button\").click()\n\n # Find the \"Status\" dynamic multi-select and type into it. Xpath is used\n # to find the next \"input\" after the \"status\" select field.\n status_field = self.browser.find_by_name(\"filter-status\").first\n status_input = status_field.find_by_xpath(\"./following::input[1]\").first\n status_input.click() # Force focus on the input field to bring it on-screen\n\n # Fill in \"Status: Active\".\n for _ in status_input.type(\"act\", slowly=True):\n pass\n status_input.type(Keys.ENTER)\n\n # Click that \"Update\" button\n self.browser.find_by_text(\"Update\").click()\n\n # Verify form redirect and presence of content.\n self.assertTrue(self.browser.is_text_present(f\"Modified dynamic group {name}\"))\n self.assertTrue(self.browser.is_text_present(\"Edit\"))\n\n # And just a cursory check to make sure that the filter worked.\n group = DynamicGroup.objects.get(name=name)\n self.assertEqual(group.count, len(devices))\n self.assertEqual(group.filter, {\"status\": [\"Active\"]})\n\n # Verify dynamic group shows up on device detail tab\n self.browser.visit(\n f'{self.live_server_url}{reverse(\"dcim:device_dynamicgroups\", kwargs={\"pk\": devices[0].pk})}'\n )\n self.assertTrue(self.browser.is_text_present(name))\n","repo_name":"nautobot/nautobot","sub_path":"nautobot/extras/tests/integration/test_dynamicgroups.py","file_name":"test_dynamicgroups.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":771,"dataset":"github-code","pt":"4"} +{"seq_id":"36551274304","text":"print('Count the total number of digits in a number')\r\n\r\n# Write a program to count the total number of digits in a number using a while loop.\r\n# For example, the number is 75869, so the output should be 5.\r\n\r\nnum = int(input('Please enter a natural number (positive and without a decimal point)'))\r\ndigits = 1\r\n\r\n# numbers must be natural, so if a number is smaller then 10, it must only have 1 digit\r\n\r\nif num <= 0:\r\n print('are you dumb? i told u a natural number! run me again and enter a natural number')\r\n\r\nelif num<10:\r\n print(digits)\r\n\r\nwhile num >= 10:\r\n digits += 1\r\n num = num/10\r\n continue\r\n\r\nprint('There are ' + str(digits) + ' digits in the number you entered')","repo_name":"IdanRossman/PythonChallanges","sub_path":"PythonChallanges/HowManyDigits.py","file_name":"HowManyDigits.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7048443755","text":"#20. Faça um programa que receba um conjunto de valores inteiros e positivos e que calcule e mostre o maior e o menor valor do conjunto. Considere que:\n#•\tPara encerrar a entrada de dados, deve ser digitado o valor zero;\n#•\tPara valores negativos, deve ser enviada uma mensagem;\n#•\tOs valores negativos ou iguais a zero não entrarão nos cálculos.\n\n# Inicializando as variáveis\nmaior_valor = -1\nmenor_valor = float('inf')\n\n# Coletando os valores\nwhile True:\n valor = int(input(\"Digite um valor inteiro e positivo (ou 0 para sair): \"))\n if valor == 0:\n break\n elif valor < 0:\n print(\"Valor inválido! Por favor, digite um valor inteiro e positivo.\")\n else:\n if valor > maior_valor:\n maior_valor = valor\n if valor < menor_valor:\n menor_valor = valor\n\n# Verificando se algum valor válido foi digitado\nif maior_valor == -1 or menor_valor == float('inf'):\n print(\"Nenhum valor válido foi digitado.\")\nelse:\n # Mostrando o maior e o menor valor\n print(\"Maior valor:\", maior_valor)\n print(\"Menor valor:\", menor_valor)\n","repo_name":"isamouraSB/Listas_Python","sub_path":"Lista1_USP/EX20.PY","file_name":"EX20.PY","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71131588277","text":"\r\nfrom lookups import ETLStep,CurrenciesToReplicate\r\nfrom database_handler import execute_query\r\n\r\nimport os\r\ndef get_prehook_sql_files(sql_command_directory_path):\r\n sql_files = [sqlfile for sqlfile in os.listdir(sql_command_directory_path) if (sqlfile.endswith('.sql') and ETLStep.PRE_HOOK.value in sqlfile) ]\r\n return sql_files\r\n\r\ndef get_hook_sql_files(sql_command_directory_path):\r\n sql_files = [sqlfile for sqlfile in os.listdir(sql_command_directory_path) if (sqlfile.endswith('.sql') and ETLStep.HOOK.value in sqlfile) ]\r\n return sql_files\r\n\r\n\r\ndef return_currencies_to_replicate():\r\n currencies_tables = list()\r\n tables = [table.value for table in CurrenciesToReplicate]\r\n for table in tables:\r\n currencies_tables.append(table)\r\n return currencies_tables\r\n\r\n\r\ndef insert_into_etl_logging_table( db_session, schema_name, step_name, start_time, end_time):\r\n query = f\"\"\"\r\n INSERT INTO {schema_name.value}.etl_history (step_name, execution_start, execution_end)\r\n VALUES\r\n (\r\n '{step_name.value}',\r\n '{start_time}',\r\n '{end_time}'\r\n )\r\n \"\"\"\r\n\r\n execute_query(db_session, query)","repo_name":"mhammadsinger/SE_FACTORY","sub_path":"SE_final project/misc_handler.py","file_name":"misc_handler.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26588966903","text":"from app import app\nfrom flask import request\nfrom base.ResultVO import ResultVO as vo\n\nfrom .prompt import mainConfig\n\nurl = mainConfig.getUrl()\n\n@app.route(url + '/keys', methods=['GET'])\ndef list_keys():\n \"\"\" 列出 所有键\"\"\"\n redis = mainConfig.getRedis()\n all_key = redis.keys()\n result = []\n for one in all_key:\n result.append(one.decode())\n return vo.multiple(result)\n\n\n@app.route(url + '/keys/', methods=['GET'])\ndef list_keys_by_len(length):\n \"\"\"列出所有, 按键的长度\"\"\"\n redis = mainConfig.getRedis()\n key_list = redis.keys('?' * length)\n result = []\n for one in key_list:\n result.append(one.decode())\n return vo.multiple(result)\n\n\n@app.route(url + '/key/', methods=['GET'])\ndef key_get(key):\n redis = mainConfig.getRedis()\n print('key get key=', key, 'redis=', redis)\n re = redis.type(key)\n if re == b'string':\n result = redis.get(key)\n return vo.single(result.decode())\n elif re == b'none':\n return vo.fail(404)\n else:\n return vo.fail(405)\n\n\n@app.route(url + '/key', methods=['POST'])\ndef key_set():\n redis_config = ['key', 'value']\n if not request.json:\n return vo.fail(406)\n for one in redis_config:\n if one not in request.json or request.json[one] == '':\n return vo.fail(406)\n redis = mainConfig.getRedis()\n redis.set(request.json['key'], request.json['value'])\n return vo.success()\n","repo_name":"Kuangcp/PythonLearn","sub_path":"web/flask/web_redis/app/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"23065120770","text":"import math\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n import cma\n\nfrom linien_common.common import MHz, Vpp\nfrom linien_server.optimization.utils import (\n FINAL_ZOOM_FACTOR,\n get_max_slope,\n optimize_phase_from_iq,\n)\n\n\nclass NoOptimizationEngine:\n def __init__(self, *args):\n pass\n\n def finished(self):\n return True\n\n def ask(self):\n raise NotImplementedError()\n\n def tell(self, *args):\n raise NotImplementedError()\n\n\nclass OneDimensionalOptimizationEngine:\n def __init__(self, bounds):\n bounds = list(bounds)\n bounds.append([0.0, 1.0])\n self._multi = MultiDimensionalOptimizationEngine(bounds)\n self._last_additional_param = 0.5\n\n def ask(self):\n params = self._multi.ask()\n self._last_additional_param = params[1]\n return [params[0]]\n\n def finished(self):\n return self._multi.finished()\n\n def tell(self, fitness, parameters):\n parameters = list(parameters)\n parameters.append(self._last_additional_param)\n self._multi.tell(fitness, parameters)\n\n\nclass MultiDimensionalOptimizationEngine:\n def __init__(self, bounds, x0=None):\n self.bounds = bounds\n\n if x0 is not None:\n x0_converted = self.params_to_internal(x0)\n else:\n x0_converted = [0.5 for v in bounds]\n\n self.es = cma.CMAEvolutionStrategy(\n x0_converted,\n 0.5,\n {\"bounds\": [[0 for v in bounds], [1 for v in bounds]]},\n )\n\n self._pending = []\n self._done = []\n self._results = []\n\n def params_to_internal(self, parameters):\n new_parameters = []\n for [min_, max_], param in zip(self.bounds, parameters):\n new_parameters.append((param - min_) / (max_ - min_))\n return new_parameters\n\n def internal_to_params(self, internal):\n parameters = []\n for [min_, max_], param in zip(self.bounds, internal):\n parameters.append((param * (max_ - min_)) + min_)\n return parameters\n\n def finished(self):\n return self.es.stop()\n\n def ask(self):\n if not self._pending:\n self._pending = self.es.ask()\n\n return self.internal_to_params(self._pending.pop())\n\n def tell(self, fitness, parameters):\n self._results.append(fitness)\n self._done.append(parameters)\n\n if not self._pending:\n self.es.tell(\n [self.params_to_internal(p) for p in self._done], self._results\n )\n self._results = []\n self._done = []\n\n\nclass OptimizerEngine:\n def __init__(self, control, params):\n self.control = control\n self.parameters = params\n\n self.init_opt_with_bounds()\n\n self.all_params = [\n params.modulation_frequency,\n params.modulation_amplitude,\n self.get_demod_phase_param(),\n ]\n self.params_before_start = [p.value for p in self.all_params]\n\n self.parameters.optimization_optimized_parameters.value = (\n self.params_before_start\n )\n\n self.initial_slope = None\n self.last_parameters = None\n self.last_parameters_internal = None\n\n def init_opt_with_bounds(self):\n params = self.parameters\n\n self.to_optimize = []\n self.bounds = []\n\n if params.optimization_mod_freq_enabled.value:\n self.to_optimize.append(params.modulation_frequency)\n freqs = list(\n sorted(\n [\n params.optimization_mod_freq_min.value * MHz,\n params.optimization_mod_freq_max.value * MHz,\n ]\n )\n )\n self.bounds.append(freqs)\n\n if params.optimization_mod_amp_enabled.value:\n self.to_optimize.append(params.modulation_amplitude)\n ampls = list(\n sorted(\n [\n params.optimization_mod_amp_min.value * Vpp,\n params.optimization_mod_amp_max.value * Vpp,\n ]\n )\n )\n self.bounds.append(ampls)\n\n self.opt = [\n NoOptimizationEngine,\n OneDimensionalOptimizationEngine,\n MultiDimensionalOptimizationEngine,\n ][len(self.bounds)]([[0, 1]] * len(self.bounds))\n\n def request_and_set_new_parameters(self, use_initial_parameters=False):\n self.control.exposed_pause_acquisition()\n\n if use_initial_parameters:\n for param, initial in zip(self.all_params, self.params_before_start):\n param.value = initial\n else:\n new_params = self.opt.ask()\n new_params_converted = [\n self.bounds[idx][0] + p * (self.bounds[idx][1] - self.bounds[idx][0])\n for idx, p in enumerate(new_params)\n ]\n\n for param, value in zip(self.to_optimize, new_params_converted):\n param.value = value\n\n self.last_parameters = list(new_params_converted)\n self.last_parameters_internal = list(new_params)\n\n self.control.exposed_write_registers()\n self.control.exposed_continue_acquisition()\n\n def finished(self):\n return self.opt.finished()\n\n def get_demod_phase_param(self):\n params = self.parameters\n dual_channel = params.dual_channel.value\n channel = params.optimization_channel.value\n\n return (\n self.parameters.demodulation_phase_a,\n self.parameters.demodulation_phase_b,\n )[0 if not dual_channel else (0, 1)[channel]]\n\n def tell(self, i, q):\n if self.initial_slope is None:\n self.initial_slope = get_max_slope(i, FINAL_ZOOM_FACTOR)\n\n optimized_phase, optimized_slope = optimize_phase_from_iq(\n i, q, FINAL_ZOOM_FACTOR\n )\n old_phase_value = self.get_demod_phase_param().value\n new_phase_value = old_phase_value - optimized_phase\n if new_phase_value > 360:\n new_phase_value -= 360\n if new_phase_value < 0:\n new_phase_value = 360 - abs(new_phase_value)\n\n improvement = (optimized_slope - self.initial_slope) / self.initial_slope\n params = self.parameters\n if improvement > 0 and improvement > params.optimization_improvement.value:\n params.optimization_improvement.value = improvement\n complete_parameter_set = self.params_before_start[:]\n complete_parameter_set[2] = new_phase_value\n\n if self.last_parameters:\n for param, value in zip(self.to_optimize, self.last_parameters):\n idx = {\n self.parameters.modulation_frequency: 0,\n self.parameters.modulation_amplitude: 1,\n }[param]\n complete_parameter_set[idx] = value\n\n params.optimization_optimized_parameters.value = complete_parameter_set\n\n print(\"improvement %d\" % (improvement * 100))\n\n fitness = math.log(1 / optimized_slope)\n\n if self.last_parameters_internal is not None:\n self.opt.tell(fitness, self.last_parameters_internal)\n\n def use_best_parameters(self):\n optimized = self.parameters.optimization_optimized_parameters.value\n\n for param, value in zip(self.all_params, optimized):\n param.value = value\n","repo_name":"linien-org/linien","sub_path":"linien-server/linien_server/optimization/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"4"} +{"seq_id":"2347313676","text":"# 시설 생성\n# DB에 Create\nimport random\nfrom django_seed import Seed\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.admin.utils import flatten\nfrom rooms import models as room_models\nfrom users import models as user_models\n\n\nclass Command(BaseCommand):\n help = \"This command creates rooms\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--number\", default=2, type=int ,help=\"How Many times do you want me to tell yoy that I love you?\")\n\n def handle(self, *args, **options):\n number = options.get(\"number\")\n seeder = Seed.seeder()\n all_user = user_models.User.objects.all()\n room_type = room_models.RoomType.objects.all()\n seeder.add_entity(room_models.Room, number,{\n 'name' : lambda x :seeder.faker.address(),\n 'host':lambda x: random.choice(all_user),\n 'room_type': lambda x: random.choice(room_type),\n 'guests': lambda x: random.randint(1, 20),\n 'price': lambda x : random.randint(1,300),\n \"beds\": lambda x : random.randint(1,5),\n \"bedrooms\": lambda x : random.randint(1,5),\n 'baths':lambda x : random.randint(1,5),\n })\n\n # seeder.execute() 이 코드 까지 해야 Room 생성\n # 아래의 created_photos = seeder.execute() 이 코드가 있는데\n # 이렇게 변수로 담아줘도 seeder.execute()가 동작 한다.\n\n ''' 여기서 부터 photo(사진) 넣는 코드 '''\n created_photos = seeder.execute()\n\n # created_clean -> 생성되는 id(pk)값을 얻을 수 있는 것\n created_clean = flatten(list(created_photos.values()))\n amenities = room_models.Amenity.objects.all()\n facilities = room_models.Facility.objects.all()\n rules = room_models.HouseRule.objects.all()\n for pk in created_clean:\n print(pk)\n room = room_models.Room.objects.get(pk=pk)\n # 사진추가하는 방법\n for i in range(3,random.randint(10,30)): #사진갯수\n room_models.Photo.objects.create(\n caption = seeder.faker.sentence(),\n room = room,\n file = f\"/room_photos/{random.randint(1,31)}.webp\",\n )\n #\n for a in amenities:\n magic_number = random.randint(0, 15)\n if magic_number % 2 == 0:\n room.amenities.add(a) # 다대다(ManytoManyField)에서 무언가 추가하는 방법\n\n for f in facilities:\n magic_number = random.randint(0, 15)\n if magic_number % 2 == 0:\n room.facilities.add(f) # 다대다(ManytoManyField)에서 무언가 추가하는 방법\n\n for r in rules:\n magic_number = random.randint(0, 15)\n if magic_number % 2 == 0:\n room.house_rules.add(r) # 다대다(ManytoManyField)에서 무언가 추가하는 방법\n\n self.stdout.write(self.style.SUCCESS(f\"{number} room created!\"))","repo_name":"qkrrkgus14/nomad","sub_path":"config/rooms/management/commands/seed_rooms.py","file_name":"seed_rooms.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34185018493","text":"# coding: utf-8\n\n\"\"\"\n Bungie.Net API\n\n These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality. # noqa: E501\n\n OpenAPI spec version: 2.3.6\n Contact: support@bungie.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass DestinyItemInstanceComponent(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'damage_type': 'int',\n 'damage_type_hash': 'int',\n 'primary_stat': 'DestinyStat',\n 'item_level': 'int',\n 'quality': 'int',\n 'is_equipped': 'bool',\n 'can_equip': 'bool',\n 'equip_required_level': 'int',\n 'unlock_hashes_required_to_equip': 'list[int]',\n 'cannot_equip_reason': 'int'\n }\n\n attribute_map = {\n 'damage_type': 'damageType',\n 'damage_type_hash': 'damageTypeHash',\n 'primary_stat': 'primaryStat',\n 'item_level': 'itemLevel',\n 'quality': 'quality',\n 'is_equipped': 'isEquipped',\n 'can_equip': 'canEquip',\n 'equip_required_level': 'equipRequiredLevel',\n 'unlock_hashes_required_to_equip': 'unlockHashesRequiredToEquip',\n 'cannot_equip_reason': 'cannotEquipReason'\n }\n\n def __init__(self, damage_type=None, damage_type_hash=None, primary_stat=None, item_level=None, quality=None, is_equipped=None, can_equip=None, equip_required_level=None, unlock_hashes_required_to_equip=None, cannot_equip_reason=None): # noqa: E501\n \"\"\"DestinyItemInstanceComponent - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._damage_type = None\n self._damage_type_hash = None\n self._primary_stat = None\n self._item_level = None\n self._quality = None\n self._is_equipped = None\n self._can_equip = None\n self._equip_required_level = None\n self._unlock_hashes_required_to_equip = None\n self._cannot_equip_reason = None\n self.discriminator = None\n\n if damage_type is not None:\n self.damage_type = damage_type\n self.damage_type_hash = damage_type_hash\n if primary_stat is not None:\n self.primary_stat = primary_stat\n if item_level is not None:\n self.item_level = item_level\n if quality is not None:\n self.quality = quality\n if is_equipped is not None:\n self.is_equipped = is_equipped\n if can_equip is not None:\n self.can_equip = can_equip\n if equip_required_level is not None:\n self.equip_required_level = equip_required_level\n if unlock_hashes_required_to_equip is not None:\n self.unlock_hashes_required_to_equip = unlock_hashes_required_to_equip\n if cannot_equip_reason is not None:\n self.cannot_equip_reason = cannot_equip_reason\n\n @property\n def damage_type(self):\n \"\"\"Gets the damage_type of this DestinyItemInstanceComponent. # noqa: E501\n\n If the item has a damage type, this is the item's current damage type. # noqa: E501\n\n :return: The damage_type of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._damage_type\n\n @damage_type.setter\n def damage_type(self, damage_type):\n \"\"\"Sets the damage_type of this DestinyItemInstanceComponent.\n\n If the item has a damage type, this is the item's current damage type. # noqa: E501\n\n :param damage_type: The damage_type of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._damage_type = damage_type\n\n @property\n def damage_type_hash(self):\n \"\"\"Gets the damage_type_hash of this DestinyItemInstanceComponent. # noqa: E501\n\n The current damage type's hash, so you can look up localized info and icons for it. # noqa: E501\n\n :return: The damage_type_hash of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._damage_type_hash\n\n @damage_type_hash.setter\n def damage_type_hash(self, damage_type_hash):\n \"\"\"Sets the damage_type_hash of this DestinyItemInstanceComponent.\n\n The current damage type's hash, so you can look up localized info and icons for it. # noqa: E501\n\n :param damage_type_hash: The damage_type_hash of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._damage_type_hash = damage_type_hash\n\n @property\n def primary_stat(self):\n \"\"\"Gets the primary_stat of this DestinyItemInstanceComponent. # noqa: E501\n\n The item stat that we consider to be \\\"primary\\\" for the item. For instance, this would be \\\"Attack\\\" for Weapons or \\\"Defense\\\" for armor. # noqa: E501\n\n :return: The primary_stat of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: DestinyStat\n \"\"\"\n return self._primary_stat\n\n @primary_stat.setter\n def primary_stat(self, primary_stat):\n \"\"\"Sets the primary_stat of this DestinyItemInstanceComponent.\n\n The item stat that we consider to be \\\"primary\\\" for the item. For instance, this would be \\\"Attack\\\" for Weapons or \\\"Defense\\\" for armor. # noqa: E501\n\n :param primary_stat: The primary_stat of this DestinyItemInstanceComponent. # noqa: E501\n :type: DestinyStat\n \"\"\"\n\n self._primary_stat = primary_stat\n\n @property\n def item_level(self):\n \"\"\"Gets the item_level of this DestinyItemInstanceComponent. # noqa: E501\n\n The Item's \\\"Level\\\" has the most significant bearing on its stats, such as Light and Power. # noqa: E501\n\n :return: The item_level of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._item_level\n\n @item_level.setter\n def item_level(self, item_level):\n \"\"\"Sets the item_level of this DestinyItemInstanceComponent.\n\n The Item's \\\"Level\\\" has the most significant bearing on its stats, such as Light and Power. # noqa: E501\n\n :param item_level: The item_level of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._item_level = item_level\n\n @property\n def quality(self):\n \"\"\"Gets the quality of this DestinyItemInstanceComponent. # noqa: E501\n\n The \\\"Quality\\\" of the item has a lesser - but still impactful - bearing on stats like Light and Power. # noqa: E501\n\n :return: The quality of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._quality\n\n @quality.setter\n def quality(self, quality):\n \"\"\"Sets the quality of this DestinyItemInstanceComponent.\n\n The \\\"Quality\\\" of the item has a lesser - but still impactful - bearing on stats like Light and Power. # noqa: E501\n\n :param quality: The quality of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._quality = quality\n\n @property\n def is_equipped(self):\n \"\"\"Gets the is_equipped of this DestinyItemInstanceComponent. # noqa: E501\n\n Is the item currently equipped on the given character? # noqa: E501\n\n :return: The is_equipped of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._is_equipped\n\n @is_equipped.setter\n def is_equipped(self, is_equipped):\n \"\"\"Sets the is_equipped of this DestinyItemInstanceComponent.\n\n Is the item currently equipped on the given character? # noqa: E501\n\n :param is_equipped: The is_equipped of this DestinyItemInstanceComponent. # noqa: E501\n :type: bool\n \"\"\"\n\n self._is_equipped = is_equipped\n\n @property\n def can_equip(self):\n \"\"\"Gets the can_equip of this DestinyItemInstanceComponent. # noqa: E501\n\n If this is an equippable item, you can check it here. There are permanent as well as transitory reasons why an item might not be able to be equipped: check cannotEquipReason for details. # noqa: E501\n\n :return: The can_equip of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._can_equip\n\n @can_equip.setter\n def can_equip(self, can_equip):\n \"\"\"Sets the can_equip of this DestinyItemInstanceComponent.\n\n If this is an equippable item, you can check it here. There are permanent as well as transitory reasons why an item might not be able to be equipped: check cannotEquipReason for details. # noqa: E501\n\n :param can_equip: The can_equip of this DestinyItemInstanceComponent. # noqa: E501\n :type: bool\n \"\"\"\n\n self._can_equip = can_equip\n\n @property\n def equip_required_level(self):\n \"\"\"Gets the equip_required_level of this DestinyItemInstanceComponent. # noqa: E501\n\n If the item cannot be equipped until you reach a certain level, that level will be reflected here. # noqa: E501\n\n :return: The equip_required_level of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._equip_required_level\n\n @equip_required_level.setter\n def equip_required_level(self, equip_required_level):\n \"\"\"Sets the equip_required_level of this DestinyItemInstanceComponent.\n\n If the item cannot be equipped until you reach a certain level, that level will be reflected here. # noqa: E501\n\n :param equip_required_level: The equip_required_level of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._equip_required_level = equip_required_level\n\n @property\n def unlock_hashes_required_to_equip(self):\n \"\"\"Gets the unlock_hashes_required_to_equip of this DestinyItemInstanceComponent. # noqa: E501\n\n Sometimes, there are limitations to equipping that are represented by character-level flags called \\\"unlocks\\\". This is a list of flags that they need in order to equip the item that the character has not met. Use these to look up the descriptions to show in your UI by looking up the relevant DestinyUnlockDefinitions for the hashes. # noqa: E501\n\n :return: The unlock_hashes_required_to_equip of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._unlock_hashes_required_to_equip\n\n @unlock_hashes_required_to_equip.setter\n def unlock_hashes_required_to_equip(self, unlock_hashes_required_to_equip):\n \"\"\"Sets the unlock_hashes_required_to_equip of this DestinyItemInstanceComponent.\n\n Sometimes, there are limitations to equipping that are represented by character-level flags called \\\"unlocks\\\". This is a list of flags that they need in order to equip the item that the character has not met. Use these to look up the descriptions to show in your UI by looking up the relevant DestinyUnlockDefinitions for the hashes. # noqa: E501\n\n :param unlock_hashes_required_to_equip: The unlock_hashes_required_to_equip of this DestinyItemInstanceComponent. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._unlock_hashes_required_to_equip = unlock_hashes_required_to_equip\n\n @property\n def cannot_equip_reason(self):\n \"\"\"Gets the cannot_equip_reason of this DestinyItemInstanceComponent. # noqa: E501\n\n If you cannot equip the item, this is a flags enum that enumerates all of the reasons why you couldn't equip the item. You may need to refine your UI further by using unlockHashesRequiredToEquip and equipRequiredLevel. # noqa: E501\n\n :return: The cannot_equip_reason of this DestinyItemInstanceComponent. # noqa: E501\n :rtype: int\n \"\"\"\n return self._cannot_equip_reason\n\n @cannot_equip_reason.setter\n def cannot_equip_reason(self, cannot_equip_reason):\n \"\"\"Sets the cannot_equip_reason of this DestinyItemInstanceComponent.\n\n If you cannot equip the item, this is a flags enum that enumerates all of the reasons why you couldn't equip the item. You may need to refine your UI further by using unlockHashesRequiredToEquip and equipRequiredLevel. # noqa: E501\n\n :param cannot_equip_reason: The cannot_equip_reason of this DestinyItemInstanceComponent. # noqa: E501\n :type: int\n \"\"\"\n\n self._cannot_equip_reason = cannot_equip_reason\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DestinyItemInstanceComponent):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"Yogarine/bungie-sdk-python","sub_path":"bungie_sdk_python/Model/Destiny/Entities/Items/destiny_item_instance_component.py","file_name":"destiny_item_instance_component.py","file_ext":"py","file_size_in_byte":14226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"21500070700","text":"import unittest\nimport logging\nfrom findfine_crawler.crawlerForExRate import CrawlerForExRate\n\"\"\"\n測試 爬取 Yahoo 外幣投資頁面匯率資料\n\"\"\"\n\nclass CrawlerForExRateTest(unittest.TestCase):\n\n #準備\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.crawler = CrawlerForExRate()\n self.crawler.initDriver()\n \n #收尾\n def tearDown(self):\n self.crawler.quitDriver()\n \n #測試爬取 yahoo currency page\n def test_crawlYahooCurrencyPage(self):\n logging.info(\"CrawlerForExRateTest.test_crawlYahooCurrencyPage\")\n self.crawler.crawlYahooCurrencyPage()\n \n#測試開始\nif __name__ == \"__main__\":\n unittest.main(exit=False)\n\n\n","repo_name":"muchu1983/104_findfine","sub_path":"test/unit/test_crawlerForExRate.py","file_name":"test_crawlerForExRate.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"10457827768","text":"from kivy.app import App\nfrom os.path import dirname, join\nfrom kivy.lang import Builder\nfrom kivy.properties import NumericProperty, StringProperty, BooleanProperty,\\\n ListProperty\nfrom kivy.clock import Clock\nfrom kivy.animation import Animation\nimport kivy\nfrom kivy.config import Config\nfrom playsound import playsound\nimport socket\nfrom home import *\nfrom calendarr import *\nfrom windows import *\n\n\nclass CloudedOperationSystemApp(App):\n\n index = NumericProperty(-1)\n current_title = StringProperty()\n time = NumericProperty(0)\n show_sourcecode = BooleanProperty(False)\n sourcecode = StringProperty()\n screen_names = ListProperty([])\n hierarchy = ListProperty([])\n\n def build(self):\n self.title = 'CLOUDED OPERATION SYSTEM'\n self.icon = 'pictures/COS.png'\n self.screens = {}\n self.available_screens = ['HOME', 'CALENDAR', 'WINDOWS']\n self.screen_names = self.available_screens\n curdir = dirname(__file__)\n self.available_screens = [join(curdir, '{}.kv'.format(fn).lower()) for fn in self.available_screens]\n self.go_next_screen()\n\n def on_stop(self):\n \"\"\"\n\n Executes when the window is closed to close\n all the other functions of the program.\n\n \"\"\"\n self.close_clock()\n self.close_sync()\n self.close_notes()\n self.close_sound()\n\n @staticmethod\n def close_clock():\n \"\"\"\n\n Closing the clock screen.\n\n \"\"\"\n client_socket = socket.socket()\n client_socket.connect((CLOUD_IP, CLOCK_PORT))\n client_socket.send(CLOSE_CLOCK_NOW)\n client_socket.close()\n\n @staticmethod\n def close_sync():\n \"\"\"\n\n Closing the sync process.\n\n \"\"\"\n client_socket = socket.socket()\n client_socket.connect((CLOUD_IP, SYNC_PORT))\n client_socket.send(CLOSE_SYNC_NOW)\n client_socket.close()\n\n @staticmethod\n def close_notes():\n \"\"\"\n\n Closing the notes screen.\n\n \"\"\"\n client_socket = socket.socket()\n client_socket.connect((CLOUD_IP, NOTES_PORT))\n client_socket.send(CLOSE_NOTES_NOW)\n client_socket.close()\n\n @staticmethod\n def close_sound():\n \"\"\"\n\n Displaying the closing sound when the window is closed.\n\n \"\"\"\n playsound(CLOSE_SOUND)\n\n def on_pause(self):\n return True\n\n def on_resume(self):\n pass\n\n def on_current_title(self, instance, value):\n \"\"\"\n\n Displaying the title of the window as the name of the screen.\n\n args:\n value (string): The name of the window.\n\n \"\"\"\n self.root.ids.spnr.text = value\n\n def go_previous_screen(self):\n \"\"\"\n\n Moving to the previous screen.\n\n \"\"\"\n self.index = (self.index - 1) % len(self.available_screens)\n screen = self.load_screen(self.index)\n sm = self.root.ids.sm\n sm.switch_to(screen, direction='right')\n self.current_title = screen.name\n self.update_sourcecode()\n\n def go_next_screen(self):\n \"\"\"\n\n Moving to the next screen.\n\n \"\"\"\n self.index = (self.index + 1) % len(self.available_screens)\n screen = self.load_screen(self.index)\n sm = self.root.ids.sm\n sm.switch_to(screen, direction='left')\n self.current_title = screen.name\n self.update_sourcecode()\n\n def go_screen(self, idx):\n \"\"\"\n\n Displaying the current screen on the window.\n\n args:\n idx (int): The index number of the current screen.\n\n \"\"\"\n self.index = idx\n self.root.ids.sm.switch_to(self.load_screen(idx), direction='left')\n self.update_sourcecode()\n\n def go_hierarchy_previous(self):\n \"\"\"\n\n Moving to the previous selected screen.\n\n \"\"\"\n ahr = self.hierarchy\n if len(ahr) == 1:\n return\n if ahr:\n ahr.pop()\n if ahr:\n idx = ahr.pop()\n self.go_screen(idx)\n\n def load_screen(self, index):\n \"\"\"\n\n Loading the graphic screen to the window\n from the kivi file.\n\n args:\n index: The index number of the current screen.\n\n \"\"\"\n os.chdir(MAIN_FOLDER)\n if index in self.screens:\n return self.screens[index]\n screen = Builder.load_file(self.available_screens[index])\n self.screens[index] = screen\n return screen\n\n def read_sourcecode(self):\n fn = self.available_screens[self.index]\n with open(fn) as fd:\n return fd.read()\n\n def toggle_source_code(self):\n self.show_sourcecode = not self.show_sourcecode\n if self.show_sourcecode:\n height = self.root.height * .3\n else:\n height = 0\n\n Animation(height=height, d=.3, t='out_quart').start(\n self.root.ids.sv)\n\n self.update_sourcecode()\n\n def update_sourcecode(self):\n if not self.show_sourcecode:\n self.root.ids.sourcecode.focus = False\n return\n self.root.ids.sourcecode.text = self.read_sourcecode()\n self.root.ids.sv.scroll_y = 1\n\n def showcase_floatlayout(self, layout):\n def add_button(*t):\n if not layout.get_parent_window():\n return\n if len(layout.children) > 5:\n layout.clear_widgets()\n layout.add_widget(Builder.load_string('''\n#:import random random.random\nButton:\n size_hint: random(), random()\n pos_hint: {'x': random(), 'y': random()}\n text:\n 'size_hint x: {} y: {}\\\\n pos_hint x: {} y: {}'.format(\\\n self.size_hint_x, self.size_hint_y, self.pos_hint['x'],\\\n self.pos_hint['y'])\n'''))\n Clock.schedule_once(add_button, 1)\n Clock.schedule_once(add_button)\n\n def showcase_boxlayout(self, layout):\n\n def add_button(*t):\n if not layout.get_parent_window():\n return\n if len(layout.children) > 5:\n layout.orientation = 'vertical'\\\n if layout.orientation == 'horizontal' else 'horizontal'\n layout.clear_widgets()\n layout.add_widget(Builder.load_string('''\nButton:\n text: self.parent.orientation if self.parent else ''\n'''))\n Clock.schedule_once(add_button, 1)\n Clock.schedule_once(add_button)\n\n def showcase_gridlayout(self, layout):\n\n def add_button(*t):\n if not layout.get_parent_window():\n return\n if len(layout.children) > 15:\n layout.rows = 3 if layout.rows is None else None\n layout.cols = None if layout.rows == 3 else 3\n layout.clear_widgets()\n layout.add_widget(Builder.load_string('''\nButton:\n text:\n 'rows: {}\\\\ncols: {}'.format(self.parent.rows, self.parent.cols)\\\n if self.parent else ''\n'''))\n Clock.schedule_once(add_button, 1)\n Clock.schedule_once(add_button)\n\n def showcase_stacklayout(self, layout):\n orientations = ('lr-tb', 'tb-lr',\n 'rl-tb', 'tb-rl',\n 'lr-bt', 'bt-lr',\n 'rl-bt', 'bt-rl')\n\n def add_button(*t):\n if not layout.get_parent_window():\n return\n if len(layout.children) > 11:\n layout.clear_widgets()\n cur_orientation = orientations.index(layout.orientation)\n layout.orientation = orientations[cur_orientation - 1]\n layout.add_widget(Builder.load_string('''\nButton:\n text: self.parent.orientation if self.parent else ''\n size_hint: .2, .2\n'''))\n Clock.schedule_once(add_button, 1)\n Clock.schedule_once(add_button)\n\n def showcase_anchorlayout(self, layout):\n\n def change_anchor(self, *l):\n if not layout.get_parent_window():\n return\n anchor_x = ('left', 'center', 'right')\n anchor_y = ('top', 'center', 'bottom')\n if layout.anchor_x == 'left':\n layout.anchor_y = anchor_y[anchor_y.index(layout.anchor_y) - 1]\n layout.anchor_x = anchor_x[anchor_x.index(layout.anchor_x) - 1]\n\n Clock.schedule_once(change_anchor, 1)\n Clock.schedule_once(change_anchor, 1)\n\n\nkivy.require('1.9.0')\nConfig.set('graphics', 'position', 'custom')\nConfig.set('graphics', 'left', 85)\nConfig.set('graphics', 'top', 100)\nConfig.set('kivy', 'window_icon', 'pictures/COS.ico')\nCloudedOperationSystemApp().run()\n","repo_name":"tur103/CLOUDED-OPERATION-SYSTEM","sub_path":"CloudedOperationSystem.py","file_name":"CloudedOperationSystem.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"551332492","text":"import pytz\nimport logging\nimport bitcoin\nfrom datetime import datetime, timedelta\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom rest_framework import serializers\n\nfrom .conf import settings as app_settings\nfrom .models import (\n HedgePosition,\n HedgePositionMetadata,\n HedgeSettlement,\n SettlementService,\n HedgePositionFee,\n HedgeFundingProposal,\n MutualRedemption,\n HedgePositionOffer,\n HedgePositionOfferCounterParty,\n HedgePositionFunding,\n\n Oracle,\n PriceOracleMessage,\n)\nfrom .utils.address import match_pubkey_to_cash_address\nfrom .utils.contract import (\n create_contract,\n get_contract_status,\n compile_contract_from_hedge_position_offer,\n)\nfrom .utils.funding import (\n get_p2p_settlement_service_fee,\n get_tx_hash,\n calculate_funding_amounts,\n validate_funding_transaction,\n calculate_hedge_sats,\n)\nfrom .utils.liquidity import (\n find_matching_position_offer,\n find_close_matching_offer_suggestion,\n fund_hedge_position,\n)\nfrom .utils.price_oracle import (\n get_price_messages,\n save_price_oracle_message,\n)\nfrom .utils.push_notification import (\n send_position_offer_settled,\n send_contract_cancelled,\n send_contract_require_funding,\n send_mutual_redemption_proposal_update,\n)\nfrom .utils.validators import (\n ValidAddress,\n ValidTxHash,\n)\nfrom .utils.websocket import (\n send_offer_settlement_update,\n send_contract_cancelled_update,\n send_funding_tx_update,\n send_mutual_redemption_update,\n send_hedge_position_offer_update,\n)\nfrom .tasks import (\n validate_contract_funding,\n parse_contract_liquidity_fee,\n)\n\nLOGGER = logging.getLogger(__name__)\n\nclass TimestampField(serializers.IntegerField):\n def to_representation(self, value):\n return datetime.timestamp(value)\n\n def to_internal_value(self, data):\n return datetime.fromtimestamp(data).replace(tzinfo=pytz.UTC)\n\n\nclass FundingProposalSerializer(serializers.Serializer):\n hedge_address = serializers.CharField(validators=[ValidAddress(addr_type=ValidAddress.TYPE_CASHADDR)])\n position = serializers.CharField() # hedge | long\n tx_hash = serializers.CharField(validators=[ValidTxHash()])\n tx_index = serializers.IntegerField()\n tx_value = serializers.IntegerField()\n script_sig = serializers.CharField()\n pubkey = serializers.CharField(required=False)\n input_tx_hashes = serializers.ListField(\n child=serializers.CharField(),\n required=False\n )\n\n def validate_hedge_address(self, value):\n try:\n hedge_position_obj = HedgePosition.objects.get(address=value)\n if hedge_position_obj.funding_tx_hash:\n raise serializers.ValidationError(\"Hedge position is already funded\")\n\n if hedge_position_obj.settlements.count():\n raise serializers.ValidationError(\"Hedge position is already settled\")\n\n if hedge_position_obj.maturity_timestamp <= timezone.now() + timedelta(minutes=1):\n raise serializers.ValidationError(\"Hedge position has reached maturity\")\n\n if hedge_position_obj.cancelled_at:\n raise serializers.ValidationError(\"Hedge position is already cancelled\")\n except HedgePosition.DoesNotExist:\n raise serializers.ValidationError(\"Hedge position does not exist\")\n\n return value\n\n def validate_position(self, value):\n if value != \"hedge\" and value != \"long\":\n raise serializers.ValidationError(\"Position must be \\\"hedge\\\" or \\\"long\\\"\")\n return value\n\n @transaction.atomic()\n def create(self, validated_data):\n hedge_address = validated_data.pop(\"hedge_address\")\n position = validated_data.pop(\"position\")\n hedge_pos_obj = HedgePosition.objects.get(address=hedge_address)\n\n update_hedge_obj = True\n\n funding_proposal = HedgeFundingProposal()\n if position == \"hedge\" and hedge_pos_obj.hedge_funding_proposal:\n funding_proposal = hedge_pos_obj.hedge_funding_proposal\n update_hedge_obj = False\n elif position == \"long\" and hedge_pos_obj.long_funding_proposal:\n funding_proposal = hedge_pos_obj.long_funding_proposal\n update_hedge_obj = False\n\n funding_proposal.tx_hash = validated_data[\"tx_hash\"]\n funding_proposal.tx_index = validated_data[\"tx_index\"]\n funding_proposal.tx_value = validated_data[\"tx_value\"]\n funding_proposal.script_sig = validated_data[\"script_sig\"]\n funding_proposal.pubkey = validated_data[\"pubkey\"]\n funding_proposal.input_tx_hashes = validated_data.get(\"input_tx_hashes\", None)\n funding_proposal.save()\n\n if update_hedge_obj:\n if position == \"hedge\":\n hedge_pos_obj.hedge_funding_proposal = funding_proposal\n elif position == \"long\":\n hedge_pos_obj.long_funding_proposal = funding_proposal\n hedge_pos_obj.save()\n\n send_funding_tx_update(hedge_pos_obj, position=position)\n try:\n send_contract_require_funding(hedge_pos_obj)\n except Exception as exception:\n LOGGER.exception(exception)\n\n return funding_proposal\n\n\nclass HedgeFundingProposalSerializer(serializers.ModelSerializer):\n class Meta:\n model = HedgeFundingProposal\n fields = [\n \"tx_hash\",\n \"tx_index\",\n \"tx_value\",\n \"script_sig\",\n \"pubkey\",\n \"input_tx_hashes\",\n ]\n\n\nclass HedgeSettlementSerializer(serializers.ModelSerializer):\n settlement_message_timestamp = TimestampField()\n\n class Meta:\n model = HedgeSettlement\n fields = [\n \"spending_transaction\",\n \"settlement_type\",\n \"hedge_satoshis\",\n \"long_satoshis\",\n \"oracle_pubkey\",\n \"settlement_price\",\n \"settlement_price_sequence\",\n \"settlement_message_sequence\",\n \"settlement_message_timestamp\",\n ]\n\n\nclass SettlementServiceSerializer(serializers.ModelSerializer):\n class Meta:\n model = SettlementService\n fields = [\n \"domain\",\n \"scheme\",\n \"port\",\n \"hedge_signature\",\n \"long_signature\",\n \"auth_token\",\n ]\n\n def validate(self, data):\n if not data.get(\"hedge_signature\", None) and not data.get(\"long_signature\", None):\n raise serializers.ValidationError(\"hedge_signature or long_signature must be given\")\n return data\n\n\nclass HedgePositionFeeSerializer(serializers.ModelSerializer):\n class Meta:\n model = HedgePositionFee\n fields = [\n \"name\",\n \"description\",\n \"address\",\n \"satoshis\",\n ]\n extra_kwargs = {\n \"name\": {\n \"allow_blank\": True,\n },\n \"description\": {\n \"allow_blank\": True,\n },\n }\n\nclass HedgePositionFundingSerializer(serializers.ModelSerializer):\n settlement_txid = serializers.CharField(read_only=True, source=\"settlement__spending_transaction\")\n\n class Meta:\n model = HedgePositionFunding\n fields = [\n \"tx_hash\",\n \"funding_output\",\n \"funding_satoshis\",\n \"settlement_txid\",\n ]\n\n\nclass CancelMutualRedemptionSerializer(serializers.Serializer):\n position = serializers.CharField()\n signature = serializers.CharField(\n help_text=\"Signature of the declining/cancelling party \" \\\n \"with message as 'hedge_schnorr_sig'/'long_schnorr_sig'(depends on itiator)\"\n )\n\n def __init__(self, *args, hedge_position=None, **kwargs):\n self.hedge_position = hedge_position\n return super().__init__(*args, **kwargs)\n\n def validate(self, data):\n if not self.hedge_position:\n raise serializers.ValidationError(\"Invalid hedge position\")\n try:\n mutual_redemption = self.hedge_position.mutual_redemption\n except HedgePosition.mutual_redemption.RelatedObjectDoesNotExist:\n raise serializers.ValidationError(\"Invalid hedge position. Mutual redemption not found\")\n\n if mutual_redemption.tx_hash:\n raise serializers.ValidationError(\"Invalid hedge position. Mutual redemption completed\")\n\n position = data[\"position\"]\n signature = data[\"signature\"]\n\n message = \"\"\n if mutual_redemption.initiator == \"hedge\":\n message = mutual_redemption.hedge_schnorr_sig\n elif mutual_redemption.initiator == \"long\":\n message = mutual_redemption.long_schnorr_sig\n\n verifying_pubkey = \"\"\n if position == \"hedge\":\n verifying_pubkey = mutual_redemption.hedge_position.hedge_pubkey\n elif position == \"long\":\n verifying_pubkey = mutual_redemption.hedge_position.long_pubkey\n\n if not bitcoin.ecdsa_verify(message, signature, verifying_pubkey):\n raise serializers.ValidationError(f\"invalid signature on: {message}\")\n return data\n\n def save(self):\n validated_data = self.validated_data\n position = validated_data[\"position\"]\n instance = self.hedge_position.mutual_redemption\n initiator = instance.initiator\n redemption_type = instance.redemption_type\n self.hedge_position.mutual_redemption.delete()\n\n if position == initiator:\n action = \"cancelled\"\n else:\n action = \"declined\"\n\n send_mutual_redemption_update(instance, action=\"cancelled\")\n try:\n send_mutual_redemption_proposal_update(\n self.hedge_position,\n action=action,\n position=position,\n redemption_type=redemption_type,\n )\n except:\n pass\n\nclass MutualRedemptionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = MutualRedemption\n fields = [\n \"initiator\",\n \"redemption_type\",\n \"hedge_satoshis\",\n \"long_satoshis\",\n \"hedge_schnorr_sig\",\n \"long_schnorr_sig\",\n \"settlement_price\",\n \"tx_hash\",\n ]\n\n extra_kwargs = {\n \"tx_hash\": {\n \"read_only\": True,\n },\n \"initiator\": {\n \"read_only\": True,\n }\n }\n\n def __init__(self, *args, hedge_position=None, **kwargs):\n self.hedge_position = hedge_position\n return super().__init__(*args, **kwargs)\n\n def validate_redemption_type(self, value):\n if self.instance and self.instance.redemption_type != value:\n raise serializers.ValidationError(\"Redemption type is not editable\")\n return value\n\n def validate_hedge_satoshis(self, value):\n if self.instance and self.instance.hedge_satoshis != value:\n raise serializers.ValidationError(\"Hedge satoshis is not editable\")\n return value\n\n def validate_long_satoshis(self, value):\n if self.instance and self.instance.long_satoshis != value:\n raise serializers.ValidationError(\"Long satoshis is not editable\")\n return value\n\n def validate_settlement_price(self, value):\n if self.instance and self.instance.settlement_price != value:\n raise serializers.ValidationError(\"Settlement price is not editable\")\n return value\n\n def validate(self, data):\n redemption_type = data.get(\"redemption_type\", None)\n settlement_price = data.get(\"settlement_price\", None)\n hedge_satoshis = data.get(\"hedge_satoshis\", None)\n long_satoshis = data.get(\"long_satoshis\", None)\n\n if not self.hedge_position.funding_tx_hash:\n raise serializers.ValidationError(\"Contract is not yet funded\")\n\n if not self.hedge_position.funding:\n funding_validation = validate_contract_funding(self.hedge_position.address)\n if not funding_validation[\"success\"] or not self.hedge_position.funding:\n raise serializers.ValidationError(\"Unable to verify funding transaction\")\n\n if redemption_type == MutualRedemption.TYPE_EARLY_MATURATION:\n if not settlement_price and settlement_price <= 0:\n raise serializers.ValidationError(f\"Settlement price required for type '{MutualRedemption.TYPE_EARLY_MATURATION}'\")\n\n elif redemption_type == MutualRedemption.TYPE_REFUND:\n if abs(hedge_satoshis - self.hedge_position.satoshis) > 1:\n raise serializers.ValidationError(f\"Hedge payout must be {self.hedge_position.satoshis} for type '{MutualRedemption.TYPE_REFUND}'\")\n elif abs(long_satoshis - self.hedge_position.long_input_sats) > 1:\n raise serializers.ValidationError(f\"Long payout must be {self.hedge_position.long_input_sats} for type '{MutualRedemption.TYPE_REFUND}'\")\n\n # calculations from anyhedge library leaves 1175 for tx fee\n tx_fee = 1175\n expected_total_output = self.hedge_position.funding.funding_satoshis - tx_fee\n total_output = hedge_satoshis + long_satoshis\n if expected_total_output != total_output:\n raise serializers.ValidationError(f\"Payout satoshis is not equal to {expected_total_output}\")\n\n return data\n\n def create(self, validated_data):\n new_proposal = False\n instance = MutualRedemption.objects.filter(hedge_position=self.hedge_position).first()\n\n if not instance:\n new_proposal = True\n instance = MutualRedemption(hedge_position=self.hedge_position, **validated_data)\n\n if instance.long_satoshis != validated_data[\"long_satoshis\"] or \\\n instance.hedge_satoshis != validated_data[\"hedge_satoshis\"] or \\\n instance.redemption_type != validated_data[\"redemption_type\"]:\n\n instance.hedge_schnorr_sig = None\n instance.long_schnorr_sig = None\n new_proposal = True\n\n instance.long_satoshis = validated_data[\"long_satoshis\"]\n instance.hedge_satoshis = validated_data[\"hedge_satoshis\"]\n instance.redemption_type = validated_data[\"redemption_type\"]\n instance.settlement_price = validated_data.get(\"settlement_price\", None)\n\n if validated_data.get(\"hedge_schnorr_sig\", None):\n instance.hedge_schnorr_sig = validated_data[\"hedge_schnorr_sig\"]\n\n if validated_data.get(\"long_schnorr_sig\", None):\n instance.long_schnorr_sig = validated_data[\"long_schnorr_sig\"]\n\n if instance.hedge_schnorr_sig and not instance.long_schnorr_sig:\n instance.initiator = MutualRedemption.POSITION_HEDGE\n elif instance.long_schnorr_sig and not instance.hedge_schnorr_sig:\n instance.initiator = MutualRedemption.POSITION_LONG\n elif not instance.long_schnorr_sig and not instance.hedge_schnorr_sig:\n serializers.ValidationError(\"Unable to resolve initiator\")\n\n instance.save()\n send_mutual_redemption_update(instance, action=\"created\")\n if new_proposal:\n try:\n send_mutual_redemption_proposal_update(\n self.hedge_position,\n action=\"proposed\",\n position=instance.initiator,\n redemption_type=instance.redemption_type,\n )\n except:\n pass\n return instance\n\nclass HedgePositionMetadataSerializer(serializers.ModelSerializer):\n class Meta:\n model = HedgePositionMetadata\n fields = [\n \"position_taker\",\n \"liquidity_fee\",\n \"network_fee\",\n \"total_hedge_funding_sats\",\n \"total_long_funding_sats\",\n ]\n\nclass CancelHedgePositionSerializer(serializers.Serializer):\n position = serializers.CharField()\n signature = serializers.CharField()\n timestamp = TimestampField(\n help_text=\"Used as a part of the message to sign in generating signature: '{unix_timestamp}:{address}'. \" \\\n \"Timestamp must not be more/less than 2 minutes of the current timestamp\"\n )\n\n def __init__(self, *args, hedge_position=None, **kwargs):\n self.hedge_position = hedge_position\n super().__init__(*args, **kwargs)\n\n def validate_position(self, value):\n if value != \"hedge\" and value != \"long\":\n raise serializers.ValidationError(\"Position must be \\\"hedge\\\" or \\\"long\\\"\")\n return value\n\n def validate_timestamp(self, value):\n if abs(timezone.now() - value) > timedelta(minutes=2):\n raise serializers.ValidationError(\"timestamp too far from current timestamp\")\n return value\n\n def validate(self, data):\n if not isinstance(self.hedge_position, HedgePosition):\n raise serializers.ValidationError(\"invalid hedge position\")\n\n if self.hedge_position.funding_tx_hash:\n raise serializers.ValidationError(\"hedge position is funded\")\n\n position = data[\"position\"]\n signature = data[\"signature\"]\n timestamp = data[\"timestamp\"]\n unix_timestamp = int(timestamp.timestamp())\n\n if self.hedge_position.cancelled_at is not None:\n raise serializers.ValidationError(\"contract is already cancelled\")\n\n verifying_pubkey = None\n if position == \"hedge\":\n verifying_pubkey = self.hedge_position.hedge_pubkey\n elif position == \"long\":\n verifying_pubkey = self.hedge_position.long_pubkey\n\n message = f\"{unix_timestamp}:{self.hedge_position.address}\"\n if not bitcoin.ecdsa_verify(message, signature, verifying_pubkey):\n raise serializers.ValidationError(f\"invalid signature on: {message}\")\n\n return data\n\n def save(self):\n validated_data = self.validated_data\n timestamp = validated_data[\"timestamp\"]\n position = validated_data[\"position\"]\n self.hedge_position.cancelled_at = timestamp\n self.hedge_position.cancelled_by = position\n self.hedge_position.save()\n\n send_contract_cancelled_update(self.hedge_position)\n try:\n send_contract_cancelled(self.hedge_position)\n except:\n pass\n return self.hedge_position\n\n\nclass HedgePositionSerializer(serializers.ModelSerializer):\n hedge_funding_proposal = HedgeFundingProposalSerializer(required=False)\n long_funding_proposal = HedgeFundingProposalSerializer(required=False)\n start_timestamp = TimestampField()\n maturity_timestamp = TimestampField()\n cancelled_at = TimestampField(read_only=True)\n\n settlements = HedgeSettlementSerializer(read_only=True, many=True)\n settlement_service = SettlementServiceSerializer()\n check_settlement_service = serializers.BooleanField(default=True, required=False, write_only=True)\n fees = HedgePositionFeeSerializer(many=True, required=False)\n fundings = HedgePositionFundingSerializer(read_only=True, many=True)\n mutual_redemption = MutualRedemptionSerializer(read_only=True)\n metadata = HedgePositionMetadataSerializer()\n price_oracle_message = serializers.SerializerMethodField(\n help_text=\"Provided only when 'starting_oracle_message' or 'starting_oracle_signature' is empty\",\n )\n\n class Meta:\n model = HedgePosition\n fields = [\n \"id\",\n \"address\",\n \"anyhedge_contract_version\",\n \"satoshis\",\n \"start_timestamp\",\n \"maturity_timestamp\",\n \"hedge_wallet_hash\",\n \"hedge_address\",\n \"hedge_pubkey\",\n \"hedge_address_path\",\n \"long_wallet_hash\",\n \"long_address\",\n \"long_pubkey\",\n \"long_address_path\",\n \"oracle_pubkey\",\n \"start_price\",\n \"low_liquidation_multiplier\",\n \"high_liquidation_multiplier\",\n\n \"starting_oracle_message\",\n \"starting_oracle_signature\",\n\n \"funding_tx_hash\",\n \"funding_tx_hash_validated\",\n \"hedge_funding_proposal\",\n \"long_funding_proposal\",\n \"cancelled_at\",\n \"cancelled_by\",\n\n \"settlements\",\n \"settlement_service\",\n \"check_settlement_service\",\n \"fees\",\n \"fundings\",\n \"mutual_redemption\",\n \"metadata\",\n \"price_oracle_message\",\n ]\n extra_kwargs = {\n \"address\": {\n \"validators\": [ValidAddress(addr_type=ValidAddress.TYPE_CASHADDR)]\n },\n \"hedge_address\": {\n \"validators\": [ValidAddress(addr_type=ValidAddress.TYPE_CASHADDR)]\n },\n \"long_address\": {\n \"validators\": [ValidAddress(addr_type=ValidAddress.TYPE_CASHADDR)]\n },\n \"hedge_wallet_hash\": {\n \"allow_blank\": True\n },\n \"long_wallet_hash\": {\n \"allow_blank\": True\n },\n \"starting_oracle_message\": {\n \"required\": True,\n },\n \"starting_oracle_signature\": {\n \"required\": True,\n },\n \"funding_tx_hash\": {\n \"allow_blank\": True\n },\n \"funding_tx_hash_validated\": {\n \"read_only\": True\n },\n \"cancelled_by\": {\n \"read_only\": True,\n }\n }\n\n def get_price_oracle_message(self, obj):\n if obj.starting_oracle_message and obj.starting_oracle_signature:\n return\n\n if obj.price_oracle_message:\n return PriceOracleMessageSerializer(obj.price_oracle_message).data\n\n def validate(self, data):\n contract_address = data.get(\"address\", None)\n oracle_pubkey = data.get(\"oracle_pubkey\", None)\n settlement_service = data.get(\"settlement_service\", None)\n check_settlement_service = data.get(\"check_settlement_service\", None)\n hedge_pubkey = data.get(\"hedge_pubkey\", None)\n long_pubkey = data.get(\"long_pubkey\")\n\n if not match_pubkey_to_cash_address(data[\"hedge_pubkey\"], data[\"hedge_address\"]):\n raise serializers.ValidationError(\"hedge public key & address does not match\")\n\n if not match_pubkey_to_cash_address(data[\"long_pubkey\"], data[\"long_address\"]):\n raise serializers.ValidationError(\"long public key & address does not match\")\n\n if settlement_service and check_settlement_service:\n access_pubkey = \"\"\n access_signature = \"\"\n if settlement_service.get(\"hedge_signature\", None):\n access_signature = settlement_service[\"hedge_signature\"]\n access_pubkey = hedge_pubkey\n elif settlement_service.get(\"long_signature\", None):\n access_signature = settlement_service[\"long_signature\"]\n access_pubkey = long_pubkey\n contract_data = get_contract_status(\n contract_address,\n access_pubkey,\n access_signature,\n settlement_service_scheme=settlement_service[\"scheme\"],\n settlement_service_domain=settlement_service[\"domain\"],\n settlement_service_port=settlement_service[\"port\"],\n authentication_token=settlement_service.get(\"auth_token\", None),\n )\n if not contract_data or contract_data.get(\"address\", None) != contract_address:\n raise serializers.ValidationError(\"Unable to verify contract from external settlement service\")\n elif not settlement_service:\n if not Oracle.objects.filter(pubkey=oracle_pubkey).exists():\n raise serializers.ValidationError(\"Unknown 'oracle_pubkey', must provide settlement service\")\n\n return data\n\n @transaction.atomic()\n def create(self, validated_data):\n validated_data.pop(\"check_settlement_service\", None)\n settlement_service_data = validated_data.pop(\"settlement_service\", None)\n fees_data = validated_data.pop(\"fees\", [])\n hedge_funding_proposal_data = validated_data.pop(\"hedge_funding_proposal\", None)\n long_funding_proposal_data = validated_data.pop(\"long_funding_proposal\", None)\n metadata_data = validated_data.pop(\"metadata\", None)\n\n instance = super().create(validated_data)\n save_instance = False\n\n if settlement_service_data is not None:\n settlement_service_data[\"hedge_position\"] = instance\n SettlementService.objects.create(**settlement_service_data)\n\n if isinstance(fees_data, list) and len(fees_data):\n for fee_data in fees_data:\n fee_data[\"hedge_position\"] = instance\n HedgePositionFee.objects.create(**fee_data)\n\n if hedge_funding_proposal_data is not None:\n hedge_funding_proposal = HedgeFundingProposal.objects.create(**hedge_funding_proposal_data)\n instance.hedge_funding_proposal = hedge_funding_proposal\n save_instance = True\n\n if long_funding_proposal_data is not None:\n long_funding_proposal = HedgeFundingProposal.objects.create(**long_funding_proposal_data)\n instance.long_funding_proposal = long_funding_proposal\n save_instance = True\n\n if metadata_data is not None:\n metadata_data[\"hedge_position\"] = instance \n HedgePositionMetadata.objects.create(**metadata_data)\n\n if save_instance:\n instance.save()\n\n return instance\n\n\nclass HedgePositionOfferCounterPartySerializer(serializers.ModelSerializer):\n calculated_hedge_sats = serializers.SerializerMethodField()\n price_oracle_message = serializers.SerializerMethodField(\n help_text=\"Provided only when 'starting_oracle_message' or 'starting_oracle_signature' is empty\",\n )\n\n class Meta:\n model = HedgePositionOfferCounterParty\n fields = [\n \"settlement_deadline\",\n \"contract_address\",\n \"anyhedge_contract_version\",\n \"wallet_hash\",\n \"address\",\n \"pubkey\",\n \"address_path\",\n \"price_message_timestamp\",\n \"price_value\",\n \"starting_oracle_message\",\n \"starting_oracle_signature\",\n \"oracle_message_sequence\",\n \"settlement_service_fee\",\n \"settlement_service_fee_address\",\n \"calculated_hedge_sats\",\n \"price_oracle_message\",\n ]\n\n extra_kwargs = {\n \"contract_address\": {\n \"read_only\": True,\n },\n \"anyhedge_contract_version\": {\n \"read_only\": True,\n },\n \"price_message_timestamp\": {\n \"read_only\": True,\n },\n \"price_value\": {\n \"read_only\": True,\n },\n \"settlement_deadline\": {\n \"read_only\": True,\n },\n \"settlement_service_fee\": {\n \"read_only\": True,\n },\n \"settlement_service_fee_address\": {\n \"read_only\": True,\n },\n \"oracle_message_sequence\": {\n \"required\": False,\n },\n \"starting_oracle_message\": {\n \"read_only\": True,\n },\n \"starting_oracle_signature\": {\n \"read_only\": True,\n },\n }\n\n def __init__(self, *args, hedge_position_offer=None, **kwargs):\n self.hedge_position_offer = hedge_position_offer\n super().__init__(*args, **kwargs)\n\n def get_calculated_hedge_sats(self, obj):\n if obj.hedge_position_offer.position == HedgePositionOffer.POSITION_LONG:\n return calculate_hedge_sats(\n long_sats=obj.hedge_position_offer.satoshis,\n low_price_mult=obj.hedge_position_offer.low_liquidation_multiplier,\n price_value=obj.price_value,\n )\n\n def get_price_oracle_message(self, obj):\n if obj.starting_oracle_message and obj.starting_oracle_signature:\n return\n\n if obj.price_oracle_message:\n return PriceOracleMessageSerializer(obj.price_oracle_message).data\n\n\n def validate_hedge_position_offer_id(self, value):\n try:\n hedge_position_offer = HedgePositionOffer.objects.get(id=value)\n if hedge_position_offer.status != HedgePositionOffer.STATUS_PENDING:\n raise serializers.ValidationError(\"hedge position offer is no longer active\")\n except HedgePositionOffer.DoesNotExist:\n raise serializers.ValidationError(\"hedge position offer not found\")\n\n return value\n\n def validate(self, data):\n if not isinstance(self.hedge_position_offer, HedgePositionOffer):\n raise serializers.ValidationError(\"invalid hedge position offer\")\n\n if self.hedge_position_offer.status != HedgePositionOffer.STATUS_PENDING:\n raise serializers.ValidationError(\"hedge position offer is no longer active\")\n\n if not match_pubkey_to_cash_address(data[\"pubkey\"], data[\"address\"]):\n raise serializers.ValidationError(\"public key & address does not match\")\n return data\n\n @transaction.atomic()\n def create(self, validated_data):\n validated_data[\"hedge_position_offer\"] = self.hedge_position_offer\n\n # get latest price data or price data from oracle_message_sequence\n oracle_message_sequence = validated_data.pop(\"oracle_message_sequence\", None)\n price_oracle_message = self.get_price_message(\n self.hedge_position_offer.oracle_pubkey,\n oracle_message_sequence=oracle_message_sequence,\n )\n\n # construct contract from js scripts to get address & anyhedge_contract_version\n contract_creation_params = {\n \"taker_side\": \"long\" if self.hedge_position_offer.position == \"hedge\" else \"hedge\",\n \"low_price_multiplier\": self.hedge_position_offer.low_liquidation_multiplier,\n \"high_price_multiplier\": self.hedge_position_offer.high_liquidation_multiplier,\n \"duration_seconds\": self.hedge_position_offer.duration_seconds,\n \"oracle_pubkey\": self.hedge_position_offer.oracle_pubkey,\n \"price_oracle_message_sequence\": price_oracle_message.message_sequence,\n }\n if self.hedge_position_offer.position == HedgePositionOffer.POSITION_HEDGE:\n contract_creation_params[\"satoshis\"] = self.hedge_position_offer.satoshis\n contract_creation_params[\"hedge_address\"] = self.hedge_position_offer.address\n contract_creation_params[\"hedge_pubkey\"] = self.hedge_position_offer.pubkey\n contract_creation_params[\"short_address\"] = validated_data[\"address\"]\n contract_creation_params[\"short_pubkey\"] = validated_data[\"pubkey\"]\n else:\n calculated_hedge_sats = calculate_hedge_sats(\n long_sats=self.hedge_position_offer.satoshis,\n low_price_mult=self.hedge_position_offer.low_liquidation_multiplier,\n price_value=price_oracle_message.price_value,\n )\n contract_creation_params[\"satoshis\"] = calculated_hedge_sats\n contract_creation_params[\"hedge_address\"] = validated_data[\"address\"]\n contract_creation_params[\"hedge_pubkey\"] = validated_data[\"pubkey\"]\n contract_creation_params[\"short_address\"] = self.hedge_position_offer.address\n contract_creation_params[\"short_pubkey\"] = self.hedge_position_offer.pubkey\n\n create_contract_response = create_contract(**contract_creation_params)\n if not create_contract_response.get(\"success\", None):\n raise serializers.ValidationError(\"unable to construct contract\")\n contract_data = create_contract_response[\"contractData\"]\n\n validated_data[\"contract_address\"] = contract_data[\"address\"]\n validated_data[\"anyhedge_contract_version\"] = contract_data[\"version\"]\n validated_data[\"price_message_timestamp\"] = price_oracle_message.message_timestamp\n validated_data[\"price_value\"] = price_oracle_message.price_value\n validated_data[\"settlement_deadline\"] = timezone.now() + timedelta(minutes=15)\n validated_data[\"oracle_message_sequence\"] = price_oracle_message.message_sequence\n validated_data[\"starting_oracle_message\"] = contract_data[\"metadata\"][\"startingOracleMessage\"]\n validated_data[\"starting_oracle_signature\"] = contract_data[\"metadata\"][\"startingOracleSignature\"]\n\n settlement_service_fee = get_p2p_settlement_service_fee()\n if settlement_service_fee and \"satoshis\" in settlement_service_fee and \"address\" in settlement_service_fee:\n validated_data[\"settlement_service_fee\"] = settlement_service_fee[\"satoshis\"]\n validated_data[\"settlement_service_fee_address\"] = settlement_service_fee[\"address\"]\n\n instance = super().create(validated_data)\n instance.hedge_position_offer.status = HedgePositionOffer.STATUS_ACCEPTED\n instance.hedge_position_offer.save()\n\n send_hedge_position_offer_update(\n instance.hedge_position_offer,\n action=\"accepted\",\n metadata={ \"accepting_wallet_hash\": instance.wallet_hash }\n )\n return instance\n\n def get_price_message(self, oracle_pubkey, oracle_message_sequence=None):\n now = timezone.now()\n query_kwargs = {\n \"pubkey\": oracle_pubkey,\n }\n if oracle_message_sequence:\n query_kwargs[\"message_sequence\"] = oracle_message_sequence\n else:\n query_kwargs[\"message_timestamp__gte\"] = now - timedelta(seconds=60)\n\n price_oracle_message = PriceOracleMessage.objects.filter(**query_kwargs).order_by(\"-message_timestamp\").first()\n\n if not price_oracle_message:\n query_kwargs = { \"count\": 1 }\n oracle_obj = Oracle.objects.filter(pubkey=oracle_pubkey).first()\n if oracle_obj:\n query_kwargs[\"relay\"] = oracle_obj.relay\n query_kwargs[\"port\"] = oracle_obj.port\n\n if oracle_message_sequence:\n query_kwargs[\"min_message_sequence\"] = oracle_message_sequence\n query_kwargs[\"max_message_sequence\"] = oracle_message_sequence\n price_data = get_price_messages(oracle_pubkey, **query_kwargs)\n if len(price_data):\n price_oracle_message = save_price_oracle_message(oracle_pubkey, price_data[0])\n\n if not price_oracle_message:\n raise serializers.ValidationError(\"unable to resolve oracle price\")\n elif price_oracle_message.message_timestamp < now - timedelta(seconds=120):\n raise serializers.ValidationError(\"starting price is outdated\")\n return price_oracle_message\n\n\nclass HedgePositionOfferSerializer(serializers.ModelSerializer):\n status = serializers.CharField(read_only=True)\n hedge_position = HedgePositionSerializer(read_only=True)\n created_at = serializers.DateTimeField(read_only=True)\n counter_party_info = HedgePositionOfferCounterPartySerializer(read_only=True)\n\n class Meta:\n model = HedgePositionOffer\n fields = [\n \"id\",\n \"status\",\n \"position\",\n \"wallet_hash\",\n \"satoshis\",\n \"duration_seconds\",\n \"high_liquidation_multiplier\",\n \"low_liquidation_multiplier\",\n \"oracle_pubkey\",\n \"address\",\n \"pubkey\",\n \"address_path\",\n \"expires_at\",\n \"created_at\",\n \"hedge_position\",\n \"counter_party_info\",\n ]\n\n extra_kwargs = {\n \"position\": {\n \"required\": True,\n \"allow_blank\": False,\n },\n }\n\n def validate_wallet_hash(self, value):\n if self.instance and self.instance.wallet_hash != value:\n raise serializers.ValidationError(\"wallet_hash is not editable\")\n return value\n\n def validate(self, data):\n if self.instance and self.instance.status != HedgePositionOffer.STATUS_PENDING:\n raise serializers.ValidationError(f\"unable to edit in \\\"{self.instance.status}\\\" state\")\n if self.instance:\n pubkey = data.get(\"pubkey\", self.instance.pubkey)\n address = data.get(\"address\", self.instance.address)\n else:\n pubkey = data[\"pubkey\"]\n address = data[\"address\"]\n if not match_pubkey_to_cash_address(pubkey, address):\n raise serializers.ValidationError(\"public key & address does not match\")\n return data\n\n\nclass MatchHedgePositionSerializer(serializers.Serializer):\n wallet_hash = serializers.CharField(required=False)\n position = serializers.CharField()\n satoshis = serializers.IntegerField()\n duration_seconds = serializers.IntegerField()\n low_liquidation_multiplier = serializers.FloatField()\n high_liquidation_multiplier = serializers.FloatField()\n oracle_pubkey = serializers.CharField()\n\n similarity = serializers.FloatField(required=False, default=0.5)\n\n matching_position_offer = HedgePositionOfferSerializer(read_only=True)\n similar_position_offers = HedgePositionOfferSerializer(many=True, read_only=True)\n\n def validate_position(self, value):\n if value not in [HedgePositionOffer.POSITION_HEDGE, HedgePositionOffer.POSITION_LONG]:\n raise serializers.ValidationError(\"invalid position type\")\n return value\n\n def find_match(self):\n response = { **self.validated_data }\n response[\"matching_position_offer\"] = find_matching_position_offer(\n position=self.validated_data[\"position\"],\n amount=self.validated_data[\"satoshis\"],\n duration_seconds=self.validated_data[\"duration_seconds\"],\n low_liquidation_multiplier=self.validated_data[\"low_liquidation_multiplier\"],\n high_liquidation_multiplier=self.validated_data[\"high_liquidation_multiplier\"],\n exclude_wallet_hash=self.validated_data[\"wallet_hash\"],\n oracle_pubkey=self.validated_data[\"oracle_pubkey\"],\n )\n\n response[\"similar_position_offers\"] = []\n if not response[\"matching_position_offer\"]:\n response[\"similar_position_offers\"] = find_close_matching_offer_suggestion(\n position=self.validated_data[\"position\"],\n amount=self.validated_data[\"satoshis\"],\n duration_seconds=self.validated_data[\"duration_seconds\"],\n low_liquidation_multiplier=self.validated_data[\"low_liquidation_multiplier\"],\n high_liquidation_multiplier=self.validated_data[\"high_liquidation_multiplier\"],\n exclude_wallet_hash=self.validated_data[\"wallet_hash\"],\n oracle_pubkey=self.validated_data[\"oracle_pubkey\"],\n similarity=self.validated_data.get(\"similarity\", 0.5),\n )\n\n return response\n\n\nclass SettleHedgePositionOfferSerializer(serializers.Serializer):\n counter_party_funding_proposal = HedgeFundingProposalSerializer()\n\n def __init__(self, *args, hedge_position_offer=None, **kwargs):\n self.hedge_position_offer = hedge_position_offer\n super().__init__(*args, **kwargs)\n\n def validate(self, data):\n if not isinstance(self.hedge_position_offer, HedgePositionOffer):\n raise serializers.ValidationError(\"invalid hedge position offer\")\n\n if self.hedge_position_offer.status != HedgePositionOffer.STATUS_ACCEPTED:\n raise serializers.ValidationError(\"hedge position offer has not been accepted yet\")\n\n try:\n if not self.hedge_position_offer.counter_party_info:\n raise HedgePositionOffer.counter_party_info.RelatedObjectDoesNotExist\n except HedgePositionOffer.counter_party_info.RelatedObjectDoesNotExist:\n raise serializers.ValidationError(\"counter party info missing\")\n\n contract_data = compile_contract_from_hedge_position_offer(self.hedge_position_offer)\n funding_amounts = calculate_funding_amounts(contract_data, position=self.hedge_position_offer.position)\n\n settlement_service_fee = self.hedge_position_offer.counter_party_info.settlement_service_fee\n total_funding_amount = funding_amounts[\"long\"] + funding_amounts[\"hedge\"]\n total_input_sats = contract_data[\"metadata\"][\"hedgeInputInSatoshis\"] + contract_data[\"metadata\"][\"longInputInSatoshis\"]\n network_fee = total_funding_amount - total_input_sats\n if settlement_service_fee:\n network_fee -= settlement_service_fee\n\n data[\"network_fee\"] = network_fee\n\n counter_party_funding_proposal_data = data[\"counter_party_funding_proposal\"]\n funding_amount = counter_party_funding_proposal_data[\"tx_value\"]\n expected_amount = funding_amounts[\"long\" if self.hedge_position_offer.position == HedgePositionOffer.POSITION_HEDGE else \"hedge\"]\n if funding_amount != expected_amount:\n raise serializers.ValidationError(f\"invalid funding amount, expected {expected_amount} satoshis\")\n\n return data\n\n @transaction.atomic()\n def save(self):\n validated_data = self.validated_data\n\n # create hedge position instance\n contract_data = compile_contract_from_hedge_position_offer(self.hedge_position_offer)\n contract_metadata = contract_data[\"metadata\"]\n contract_parameters = contract_data[\"parameters\"]\n start_timestamp = self.hedge_position_offer.counter_party_info.price_message_timestamp\n maturity_timestamp = start_timestamp + timedelta(seconds=self.hedge_position_offer.duration_seconds)\n starting_oracle_message = contract_metadata[\"startingOracleMessage\"]\n starting_oracle_signature = contract_metadata[\"startingOracleSignature\"]\n\n\n hedge_position = HedgePosition.objects.create(\n address = contract_data[\"address\"],\n anyhedge_contract_version = contract_data[\"version\"],\n satoshis = contract_metadata[\"hedgeInputInSatoshis\"],\n start_timestamp = start_timestamp,\n maturity_timestamp = maturity_timestamp,\n hedge_address = contract_metadata[\"hedgePayoutAddress\"],\n hedge_pubkey = contract_parameters[\"hedgeMutualRedeemPublicKey\"],\n long_address = contract_metadata[\"longPayoutAddress\"],\n long_pubkey = contract_parameters[\"longMutualRedeemPublicKey\"],\n oracle_pubkey = contract_parameters[\"oraclePublicKey\"],\n start_price = contract_metadata[\"startPrice\"],\n starting_oracle_message = contract_metadata[\"startingOracleMessage\"],\n starting_oracle_signature = contract_metadata[\"startingOracleSignature\"],\n low_liquidation_multiplier = contract_metadata[\"lowLiquidationPriceMultiplier\"],\n high_liquidation_multiplier = contract_metadata[\"highLiquidationPriceMultiplier\"],\n )\n hedge_position.hedge_wallet_hash = self.hedge_position_offer.wallet_hash\n hedge_position.hedge_address_path = self.hedge_position_offer.address_path\n hedge_position.long_wallet_hash = self.hedge_position_offer.counter_party_info.wallet_hash\n hedge_position.long_address_path = self.hedge_position_offer.counter_party_info.address_path\n if self.hedge_position_offer.position == HedgePositionOffer.POSITION_LONG:\n hedge_position.hedge_wallet_hash, hedge_position.long_wallet_hash = hedge_position.long_wallet_hash, hedge_position.hedge_wallet_hash\n hedge_position.hedge_address_path, hedge_position.long_address_path = hedge_position.long_address_path, hedge_position.hedge_address_path\n\n # create funding proposal of counter party\n counter_party_funding_proposal_data = validated_data[\"counter_party_funding_proposal\"]\n counter_party_funding_proposal_obj = HedgeFundingProposal.objects.create(**counter_party_funding_proposal_data)\n if self.hedge_position_offer.position == HedgePositionOffer.POSITION_HEDGE:\n hedge_position.long_funding_proposal = counter_party_funding_proposal_obj\n else:\n hedge_position.hedge_funding_proposal = counter_party_funding_proposal_obj\n hedge_position.save()\n\n # create hedge position's fee, if available\n settlement_service_fee = self.hedge_position_offer.counter_party_info.settlement_service_fee\n settlement_service_fee_address = self.hedge_position_offer.counter_party_info.settlement_service_fee_address\n if settlement_service_fee and settlement_service_fee_address:\n HedgePositionFee.objects.create(\n hedge_position=hedge_position,\n name=\"Settlement Service\",\n description=\"Settlement service fee for Watchtower\",\n satoshis=settlement_service_fee,\n address=settlement_service_fee_address,\n )\n\n # create hedge position's metadata\n HedgePositionMetadata.objects.create(\n hedge_position=hedge_position,\n position_taker=self.hedge_position_offer.position,\n network_fee=validated_data.get(\"network_fee\", None),\n liquidity_fee=0,\n )\n\n hedge_position.refresh_from_db()\n self.hedge_position_offer.hedge_position = hedge_position\n self.hedge_position_offer.status = HedgePositionOffer.STATUS_SETTLED\n self.hedge_position_offer.save()\n send_hedge_position_offer_update(\n self.hedge_position_offer,\n action=\"settled\",\n metadata={\n \"address\": self.hedge_position_offer.hedge_position.address,\n }\n )\n\n try:\n send_position_offer_settled(self.hedge_position_offer)\n except Exception as exception:\n LOGGER.exception(exception)\n\n return hedge_position\n\n\nclass SubmitFundingTransactionSerializer(serializers.Serializer):\n hedge_position_address = serializers.CharField(validators=[ValidAddress(addr_type=ValidAddress.TYPE_CASHADDR)])\n tx_hash = serializers.CharField(validators=[ValidTxHash()], required=False)\n tx_hex = serializers.CharField(required=False)\n\n def validate_hedge_address(value):\n try:\n hedge_position_obj = HedgePosition.objects.get(address=value)\n except HedgePosition.DoesNotExist:\n raise serializers.ValidationError(\"Hedge position does not exist\")\n\n return value\n\n def validate(self, data):\n hedge_position_address = data.get(\"hedge_position_address\", None)\n tx_hash = data.get(\"tx_hash\", None)\n tx_hex = data.get(\"tx_hex\", None)\n if not tx_hash and not tx_hex:\n raise serializers.ValidationError(\"tx_hash or tx_hex required\")\n\n # TODO: route for broadcasting tx_hex if necessary\n if tx_hash:\n funding_tx_validation = validate_funding_transaction(tx_hash, hedge_position_address)\n if not funding_tx_validation[\"valid\"]:\n raise serializers.ValidationError(f\"funding tx hash '{tx_hash}' invalid\")\n elif tx_hex:\n _tx_hash = get_tx_hash(tx_hex)\n funding_tx_validation = validate_funding_transaction(_tx_hash, hedge_position_address)\n if not funding_tx_validation[\"valid\"]:\n raise serializers.ValidationError(f\"funding tx hex invalid\")\n\n return data\n \n @transaction.atomic()\n def save(self):\n validated_data = self.validated_data\n hedge_position_address = validated_data[\"hedge_position_address\"]\n\n tx_hash = validated_data.get(\"tx_hash\", None)\n if not tx_hash:\n tx_hash = get_tx_hash(validated_data[\"tx_hex\"])\n\n hedge_position_obj = HedgePosition.objects.get(address=hedge_position_address)\n\n if hedge_position_obj.funding_tx_hash != tx_hash:\n hedge_position_obj.funding_tx_hash_validated = False\n\n hedge_position_obj.funding_tx_hash = tx_hash\n hedge_position_obj.save()\n send_funding_tx_update(hedge_position_obj, tx_hash=hedge_position_obj.funding_tx_hash)\n return hedge_position_obj\n\n\nclass FundGeneralProcotolLPContractSerializer(serializers.Serializer):\n contract_address = serializers.CharField()\n position = serializers.ChoiceField(choices=[\"hedge\", \"long\"])\n hedge_wallet_hash = serializers.CharField(required=False)\n hedge_pubkey = serializers.CharField(required=False)\n hedge_address_path = serializers.CharField(required=False)\n\n long_wallet_hash = serializers.CharField(required=False)\n long_pubkey = serializers.CharField(required=False)\n long_address_path = serializers.CharField(required=False)\n oracle_message_sequence = serializers.IntegerField()\n liquidity_fee = serializers.IntegerField(required=False)\n\n settlement_service = SettlementServiceSerializer()\n funding_proposal = HedgeFundingProposalSerializer()\n\n def validate(self, data):\n position = data[\"position\"]\n hedge_wallet_hash = data.get(\"hedge_wallet_hash\", None)\n hedge_pubkey = data.get(\"hedge_pubkey\", None)\n long_wallet_hash = data.get(\"long_wallet_hash\", None)\n long_pubkey = data.get(\"long_pubkey\", None)\n\n if position == \"hedge\" and (not hedge_wallet_hash or not hedge_pubkey):\n raise serializers.ValidationError(\"'hedge_wallet_hash' or 'hedge_pubkey' required when taking 'hedge' position\")\n\n if position == \"long\" and (not long_wallet_hash or not long_pubkey):\n raise serializers.ValidationError(\"'long_wallet_hash' or 'long_pubkey' required when taking 'long' position\")\n\n return data\n\n @transaction.atomic()\n def save(self):\n validated_data = self.validated_data\n contract_address = validated_data[\"contract_address\"]\n position = validated_data[\"position\"]\n hedge_wallet_hash = validated_data.get(\"hedge_wallet_hash\", None)\n hedge_pubkey = validated_data.get(\"hedge_pubkey\", None)\n hedge_address_path = validated_data.get(\"hedge_address_path\", None)\n long_wallet_hash = validated_data.get(\"long_wallet_hash\", None)\n long_pubkey = validated_data.get(\"long_pubkey\", None)\n long_address_path = validated_data.get(\"long_address_path\", None)\n\n oracle_message_sequence = validated_data[\"oracle_message_sequence\"]\n liquidity_fee = validated_data.get(\"liquidity_fee\", None)\n settlement_service = validated_data[\"settlement_service\"]\n funding_proposal = validated_data[\"funding_proposal\"]\n\n access_pubkey = \"\"\n access_signature = \"\"\n if settlement_service.get(\"hedge_signature\", None):\n access_signature = settlement_service[\"hedge_signature\"]\n access_pubkey = hedge_pubkey\n elif settlement_service.get(\"long_signature\", None):\n access_signature = settlement_service[\"long_signature\"]\n access_pubkey = long_pubkey\n\n contract_data = get_contract_status(\n contract_address,\n access_pubkey,\n access_signature,\n settlement_service_scheme=settlement_service[\"scheme\"],\n settlement_service_domain=settlement_service[\"domain\"],\n settlement_service_port=settlement_service[\"port\"],\n authentication_token=settlement_service.get(\"auth_token\", None),\n )\n\n if contract_data[\"address\"] != contract_address:\n raise serializers.ValidationError(\"Contract address from settlement does not match\")\n\n contract_metadata = contract_data[\"metadata\"]\n contract_parameters = contract_data[\"parameters\"]\n start_timestamp = contract_parameters[\"startTimestamp\"]\n # NOTE: handling old & new implementation since settlement service might be using the old one\n # remove handling old one after stable\n if \"hedgeInputInSatoshis\" in contract_metadata:\n satoshis = contract_metadata[\"hedgeInputInSatoshis\"]\n maturity_timestamp = contract_parameters[\"maturityTimestamp\"]\n\n hedge_address = contract_metadata[\"hedgePayoutAddress\"]\n hedge_pubkey = contract_parameters[\"hedgeMutualRedeemPublicKey\"]\n long_address = contract_metadata[\"longPayoutAddress\"]\n long_pubkey = contract_parameters[\"longMutualRedeemPublicKey\"]\n\n starting_oracle_message = contract_metadata[\"startingOracleMessage\"]\n starting_oracle_signature = contract_metadata[\"startingOracleSignature\"]\n fees = []\n if isinstance(contract_data.get(\"fees\"), list):\n for fee in contract_data[\"fees\"]:\n fees.append({\n \"name\": fee[\"name\"],\n \"description\": fee[\"description\"],\n \"address\": fee[\"address\"],\n \"satoshis\": fee[\"satoshis\"],\n })\n else:\n satoshis = contract_metadata[\"hedgeInputSats\"]\n maturity_timestamp = start_timestamp + contract_metadata[\"duration\"]\n\n hedge_address = contract_metadata[\"hedgeAddress\"]\n hedge_pubkey = contract_metadata[\"hedgePublicKey\"]\n long_address = contract_metadata[\"longAddress\"]\n long_pubkey = contract_metadata[\"longPublicKey\"]\n\n starting_oracle_message = \"\"\n starting_oracle_signature = \"\"\n price_oracle_message = PriceOracleMessage.objects.filter(\n pubkey=contract_metadata[\"oraclePublicKey\"],\n message_sequence=oracle_message_sequence,\n ).first()\n if price_oracle_message:\n starting_oracle_message = price_oracle_message.message\n starting_oracle_signature = price_oracle_message.signature\n\n fees = []\n if contract_data.get(\"fee\", None):\n fees.append({\n \"address\": contract_data[\"fee\"][\"address\"],\n \"satoshis\": contract_data[\"fee\"][\"satoshis\"],\n })\n\n hedge_position_data = dict(\n address=contract_data[\"address\"],\n anyhedge_contract_version=contract_data[\"version\"],\n satoshis=satoshis,\n start_timestamp=start_timestamp,\n maturity_timestamp=maturity_timestamp,\n hedge_wallet_hash=hedge_wallet_hash or \"\",\n hedge_address=hedge_address,\n hedge_address_path=hedge_address_path,\n hedge_pubkey=hedge_pubkey,\n long_wallet_hash=long_wallet_hash or \"\",\n long_address=long_address,\n long_address_path=long_address_path,\n long_pubkey=long_pubkey,\n oracle_pubkey=contract_parameters[\"oraclePublicKey\"],\n start_price=contract_metadata[\"startPrice\"],\n low_liquidation_multiplier=contract_metadata[\"lowLiquidationPriceMultiplier\"],\n high_liquidation_multiplier=contract_metadata[\"highLiquidationPriceMultiplier\"],\n starting_oracle_message=starting_oracle_message,\n starting_oracle_signature=starting_oracle_signature,\n funding_tx_hash=\"\",\n settlement_service=settlement_service,\n check_settlement_service=False,\n metadata=dict(position_taker=position),\n fees=fees,\n )\n\n if position == \"hedge\":\n hedge_position_data[\"hedge_funding_proposal\"] = funding_proposal\n hedge_position_data[\"metadata\"][\"total_hedge_funding_sats\"] = funding_proposal[\"tx_value\"]\n elif position == \"long\":\n hedge_position_data[\"long_funding_proposal\"] = funding_proposal\n hedge_position_data[\"metadata\"][\"total_long_funding_sats\"] = funding_proposal[\"tx_value\"]\n\n if liquidity_fee is not None:\n hedge_position_data[\"metadata\"][\"liquidity_fee\"] = liquidity_fee\n\n hedge_position_serializer = HedgePositionSerializer(data=hedge_position_data)\n hedge_position_serializer.is_valid(raise_exception=True)\n hedge_position_obj = hedge_position_serializer.save()\n\n # this must be at the last part as much as possible\n funding_response = fund_hedge_position(\n contract_data,\n {\n \"txHash\": funding_proposal[\"tx_hash\"],\n \"txIndex\": funding_proposal[\"tx_index\"],\n \"txValue\": funding_proposal[\"tx_value\"],\n \"scriptSig\": funding_proposal[\"script_sig\"],\n \"publicKey\": funding_proposal[\"pubkey\"],\n \"inputTxHashes\": funding_proposal[\"input_tx_hashes\"],\n },\n oracle_message_sequence,\n position=position,\n )\n if not funding_response[\"success\"]:\n error = \"Error in funding hedge position\"\n if funding_response.get(\"error\", None):\n error += f\". {funding_response['error']}\"\n raise serializers.ValidationError(error)\n\n hedge_position_obj.funding_tx_hash = funding_response[\"fundingTransactionHash\"]\n hedge_position_obj.save()\n\n validate_contract_funding.delay(hedge_position_obj.address)\n parse_contract_liquidity_fee.delay(hedge_position_obj.address, hard_update=False)\n return hedge_position_obj\n\n\nclass OracleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Oracle\n fields = [\n \"active\",\n \"pubkey\",\n \"asset_name\",\n \"asset_currency\",\n \"asset_decimals\",\n ]\n\nclass PriceOracleMessageSerializer(serializers.ModelSerializer):\n message_timestamp = TimestampField()\n\n class Meta:\n model = PriceOracleMessage\n fields = [\n \"pubkey\",\n \"message_timestamp\",\n \"price_value\",\n \"price_sequence\",\n \"message_sequence\",\n \"message\",\n \"signature\",\n ]\n","repo_name":"paytaca/watchtower-cash","sub_path":"anyhedge/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":57998,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"4"} +{"seq_id":"5354289893","text":"import itertools\nimport sys\n\ninput = sys.stdin.readline\nN = int(input())\nss = []\nbs = []\nanswer = float('inf')\n\nfor _ in range(N):\n s, b = map(int, input().split())\n ss.append(s)\n bs.append(b)\n\nfor mask in itertools.product([0,1], repeat=N):\n ns = 1\n nb = 0\n if sum(mask): # 재료가 있는 경우\n for i in range(N):\n if mask[i]:\n ns *= ss[i]\n nb += bs[i]\n answer = min(answer, abs(ns-nb))\n\nprint(answer)\n","repo_name":"gabozako/AlgoQueen","sub_path":"suy2on/icpc/brute_force/4_2.py","file_name":"4_2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30007128540","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.mail import send_mail\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(\n widget=forms.TextInput(attrs={'placeholder': 'Your name'}))\n email = forms.EmailField(\n widget=forms.TextInput(attrs={'placeholder': 'Your email address'}))\n subject = forms.CharField(widget=forms.TextInput(\n attrs={'placeholder': 'The subject of the message'}))\n message = forms.CharField(\n widget=forms.Textarea(attrs={'placeholder': 'Type something nice'}))\n\n def send_email(self):\n name = self.cleaned_data.get('name')\n email = self.cleaned_data.get('email')\n sent_from = '{name} {email}'.format(name=name, email=email)\n subject = self.cleaned_data.get('subject')\n message = self.cleaned_data.get('message')\n recipient = get_user_model().objects.get(pk=1).email\n\n send_mail(subject, message, sent_from, [recipient])","repo_name":"matachi/image-gallery","sub_path":"imagegallery/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"2912159947","text":"bl_info = {\n \"name\": \"PyClone\",\n \"author\": \"Andrew Peel\",\n \"version\": (0, 2, 0),\n \"blender\": (2, 91, 0),\n \"location\": \"3D Viewport Sidebar and File Browser\",\n \"description\": \"This is an asset management and development engine\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"category\": \"Asset Management\",\n}\n\nimport bpy\nfrom bpy.app.handlers import persistent\nfrom .ui import pc_filebrowser_ui\nfrom .ui import pc_lists\nfrom .ui import pc_view3d_ui_menu\nfrom .ui import pc_view3d_ui_sidebar_object\nfrom .ui import pc_view3d_ui_sidebar_assemblies\nfrom .ops import pc_assembly\nfrom .ops import pc_driver\nfrom .ops import pc_prompts\nfrom .ops import pc_library\nfrom .ops import pc_material\nfrom .ops import pc_object\nfrom .ops import pc_general\nfrom .ops import pc_window_manager\nfrom . import pyclone_props\n\n@persistent\ndef load_driver_functions(scene):\n \"\"\" Load Default Drivers\n \"\"\"\n import inspect\n from . import pyclone_driver_functions\n for name, obj in inspect.getmembers(pyclone_driver_functions):\n if name not in bpy.app.driver_namespace:\n bpy.app.driver_namespace[name] = obj\n\naddon_keymaps = []\n\ndef register():\n pc_filebrowser_ui.register()\n pc_lists.register()\n pc_view3d_ui_menu.register()\n pc_view3d_ui_sidebar_object.register()\n pc_view3d_ui_sidebar_assemblies.register()\n pc_assembly.register()\n pc_driver.register()\n pc_prompts.register()\n pc_library.register()\n pc_material.register()\n pc_object.register()\n pc_general.register()\n pc_window_manager.register()\n pyclone_props.register()\n bpy.app.handlers.load_post.append(load_driver_functions)\n\n wm = bpy.context.window_manager\n kc = wm.keyconfigs.addon\n if kc:\n km = kc.keymaps.new(name='3D View',space_type = 'VIEW_3D')\n kmi = km.keymap_items.new('wm.drag_and_drop', type = 'P', value='PRESS',shift=False)\n addon_keymaps.append((km,kmi))\n \ndef unregister():\n for km,kmi in addon_keymaps:\n km.keymap_items.remove(kmi)\n addon_keymaps.clear()\n\n pc_filebrowser_ui.unregister()\n pc_lists.unregister()\n pc_view3d_ui_menu.unregister()\n pc_view3d_ui_sidebar_object.unregister()\n pc_view3d_ui_sidebar_assemblies.unregister()\n pc_assembly.unregister()\n pc_driver.unregister()\n pc_prompts.unregister()\n pc_library.unregister()\n pc_material.unregister()\n pc_object.unregister()\n pc_general.unregister()\n pc_window_manager.unregister()\n pyclone_props.unregister()\n bpy.app.handlers.load_post.append(load_driver_functions)","repo_name":"Moonlight63/blender-pyclone-addons","sub_path":"PyClone/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15024164642","text":"from tensorflow import keras\nimport numpy as np\nimport cv2\nimport csv\nfrom combine_images import IMAGES_PER_FILE\n\nCLASSIFIER_RESOLUTION = 128\nSHAPES = [\"Circle\", \"Semicircle\", \"Quarter Circle\", \"Triangle\", \"Square\", \"Rectangle\", \"Trapezoid\", \"Pentagon\", \"Hexagon\", \"Heptagon\", \"Octagon\", \"Star\", \"Cross\"]\nALPHANUMERICS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\"]\nCOLORS = [\"White\", \"Black\", \"Gray\", \"Red\", \"Blue\", \"Green\", \"Yellow\", \"Purple\", \"Brown\", \"Orange\"]\nOUTPUT_LENGTH = {\n \"orientation\": 18,\n \"shapeColor\": 10,\n \"shape\": 13,\n \"alphanumericColor\": 10,\n \"alphanumeric\": 36,\n}\nEDGE_MIN = 100\nEDGE_MAX = 200\nEDGE_APERTURE_SIZE = 3\nEDGE_L2_GRADIENT = False\n\nclass DataGenerator(keras.utils.Sequence):\n def __init__(self, list_IDs, validation, characteristic, edge):\n self.list_IDs = list_IDs\n self.validation = validation\n self.characteristic = characteristic\n self.edge = edge\n\n def __len__(self):\n return len(self.list_IDs)\n\n def __getitem__(self, index):\n ID = self.list_IDs[index]\n combined_image = cv2.imread(f\"Cropped{' Validation' if self.validation else ''} Screenshots/{ID}.png\")\n csv_file = open(f\"Cropped{' Validation' if self.validation else ''} Screenshots/{ID}.csv\")\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n X = np.zeros((IMAGES_PER_FILE, CLASSIFIER_RESOLUTION, CLASSIFIER_RESOLUTION, 1 if self.edge else 3))\n Y = np.zeros((IMAGES_PER_FILE, OUTPUT_LENGTH[self.characteristic]))\n for i in range(IMAGES_PER_FILE):\n image = combined_image[:, i * CLASSIFIER_RESOLUTION:(i + 1) * CLASSIFIER_RESOLUTION]\n if self.edge:\n image = cv2.Canny(image, EDGE_MIN, EDGE_MAX, apertureSize=EDGE_APERTURE_SIZE, L2gradient=EDGE_L2_GRADIENT)\n else:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n X[i] = image.reshape(X[i].shape)\n row = next(csv_reader)\n if self.characteristic == \"orientation\":\n Y[i][int(row[0]) // 20] = 1\n elif self.characteristic == \"shapeColor\":\n Y[i][COLORS.index(row[1])] = 1\n elif self.characteristic == \"shape\":\n Y[i][SHAPES.index(row[2])] = 1\n elif self.characteristic == \"alphanumericColor\":\n Y[i][COLORS.index(row[3])] = 1\n elif self.characteristic == \"alphanumeric\":\n Y[i][ALPHANUMERICS.index(row[4])] = 1\n X /= 255\n csv_file.close()\n return X, Y\n","repo_name":"JhihYangWu/AzA-Computer-Vision-2022","sub_path":"Standard Object Detection/Classification and Localization/Classification Only/Seperate CNNs/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23598962125","text":"import numpy as np\nimport cv2 as cv\n\nclass CountMussel:\n def __init__(self, img,\n min_aspect_ratio=0.95,\n max_aspect_ratio=1.05):\n self.img_copy = img.copy()\n self.height = img.shape[0]\n self.width = img.shape[1]\n self.min_aspect_ratio = min_aspect_ratio\n self.max_aspect_ratio = max_aspect_ratio\n\n def bgr2gray(self):\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n return img_gray\n\n def findCont(self):\n img_gray = self.bgr2gray()\n _, thresh = cv.threshold(img_gray, 240, 255, cv.THRESH_BINARY)\n contours, _ = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n return contours\n\n def find_square(self):\n contours = self.findCont()\n for c in contours:\n approx = cv.approxPolyDP(c, 0.005 * cv.arcLength(c, True), True)\n cv.drawContours(self.img_copy, [approx], 0, (0, 0, 255), 1) # kalınlığı -1 yapınca içini dolduruyor :D\n approx_array = approx.ravel()\n\n if len(approx) == 4:\n x1, y1, w, h = cv.boundingRect(approx)\n aspect_ratio = float(w) / float(h)\n\n if aspect_ratio >= self.min_aspect_ratio and aspect_ratio <= self.max_aspect_ratio:\n i = 0\n point_list = []\n for _ in approx_array:\n if (i % 2 == 0):\n x = approx_array[i]\n y = approx_array[i + 1]\n point_list.append([x, y])\n i = i + 1\n return point_list\n else:\n continue\n else:\n continue\n\n def point_crop_image(self):\n point_list = self.find_square()\n mask = np.zeros((self.height, self.width), dtype=np.uint8)\n points = np.array([\n point_list\n ])\n\n cv.fillPoly(mask, points, (255))\n\n res = cv.bitwise_and(img, img, mask=mask)\n\n rect = cv.boundingRect(points) # returns (x,y,w,h) of the rect\n cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]\n\n return res\n\n def count_mussels(self, param1, param2):\n img_count = self.point_crop_image()\n gray = cv.cvtColor(img_count, cv.COLOR_BGR2GRAY)\n minDist = 1\n minRadius = 0\n maxRadius = 10 # 10\n circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2,\n minRadius=minRadius,\n maxRadius=maxRadius)\n counter = 0\n if circles is not None:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv.circle(img_count, (i[0], i[1]), i[2], (0, 255, 0), 2)\n counter = counter + 1\n\n return counter, img_count\n\n#controlled parameters with trackbar \ndef tracker():\n cv.namedWindow(\"Parameters\")\n cv.resizeWindow(\"Parameters\", 640, 240)\n cv.createTrackbar(\"Param1\", \"Parameters\", 210, 500, lambda x: x)\n cv.createTrackbar(\"Param2\", \"Parameters\", 20, 200, lambda x: x)\n return cv\n\ntracker_cv = tracker()\nimg = cv.imread(\"mussel_square.png\")\nwhile True:\n param1 = tracker_cv.getTrackbarPos(\"Param1\", \"Parameters\")\n param2 = tracker_cv.getTrackbarPos(\"Param2\", \"Parameters\")\n\n if (cv.waitKey(1) & 0xFF) == ord(\"q\"):\n break\n\n sayi, img_count = CountMussel(img).count_mussels(param1, param2)\n\n print(sayi)\n cv.imshow(\"image\", img)\n cv.imshow(\"count\", img_count)\n\ncv.destroyAllWindows()\n","repo_name":"feceugur/ROV_ImageProc","sub_path":"Count_Mussel.py","file_name":"Count_Mussel.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"37130520144","text":"import logging\nfrom typing import Any, Optional\n\nfrom snowflake.connector import SnowflakeConnection\nfrom snowflake.connector.cursor import DictCursor\nfrom typing_extensions import Protocol\n\nfrom datahub.configuration.common import MetaError\nfrom datahub.configuration.pattern_utils import is_schema_allowed\nfrom datahub.ingestion.source.snowflake.constants import (\n GENERIC_PERMISSION_ERROR_KEY,\n SNOWFLAKE_DEFAULT_CLOUD,\n SNOWFLAKE_REGION_CLOUD_REGION_MAPPING,\n SnowflakeObjectDomain,\n)\nfrom datahub.ingestion.source.snowflake.snowflake_config import SnowflakeV2Config\nfrom datahub.ingestion.source.snowflake.snowflake_report import SnowflakeV2Report\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\nclass SnowflakePermissionError(MetaError):\n \"\"\"A permission error has happened\"\"\"\n\n\n# Required only for mypy, since we are using mixin classes, and not inheritance.\n# Reference - https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes\nclass SnowflakeLoggingProtocol(Protocol):\n logger: logging.Logger\n\n\nclass SnowflakeQueryProtocol(SnowflakeLoggingProtocol, Protocol):\n def get_connection(self) -> SnowflakeConnection:\n ...\n\n\nclass SnowflakeQueryMixin:\n def query(self: SnowflakeQueryProtocol, query: str) -> Any:\n try:\n self.logger.debug(\"Query : {}\".format(query))\n resp = self.get_connection().cursor(DictCursor).execute(query)\n return resp\n\n except Exception as e:\n if is_permission_error(e):\n raise SnowflakePermissionError(e) from e\n raise\n\n\nclass SnowflakeCommonProtocol(SnowflakeLoggingProtocol, Protocol):\n config: SnowflakeV2Config\n report: SnowflakeV2Report\n\n def get_dataset_identifier(\n self, table_name: str, schema_name: str, db_name: str\n ) -> str:\n ...\n\n def get_dataset_identifier_from_qualified_name(self, qualified_name: str) -> str:\n ...\n\n def snowflake_identifier(self, identifier: str) -> str:\n ...\n\n def report_warning(self, key: str, reason: str) -> None:\n ...\n\n def report_error(self, key: str, reason: str) -> None:\n ...\n\n\nclass SnowflakeCommonMixin:\n platform = \"snowflake\"\n\n @staticmethod\n def create_snowsight_base_url(\n account_locator: str,\n cloud_region_id: str,\n cloud: str,\n privatelink: bool = False,\n ) -> Optional[str]:\n if privatelink:\n url = f\"https://app.{account_locator}.{cloud_region_id}.privatelink.snowflakecomputing.com/\"\n elif cloud == SNOWFLAKE_DEFAULT_CLOUD:\n url = f\"https://app.snowflake.com/{cloud_region_id}/{account_locator}/\"\n else:\n url = f\"https://app.snowflake.com/{cloud_region_id}.{cloud}/{account_locator}/\"\n return url\n\n @staticmethod\n def get_cloud_region_from_snowflake_region_id(region):\n if region in SNOWFLAKE_REGION_CLOUD_REGION_MAPPING.keys():\n cloud, cloud_region_id = SNOWFLAKE_REGION_CLOUD_REGION_MAPPING[region]\n elif region.startswith((\"aws_\", \"gcp_\", \"azure_\")):\n # e.g. aws_us_west_2, gcp_us_central1, azure_northeurope\n cloud, cloud_region_id = region.split(\"_\", 1)\n cloud_region_id = cloud_region_id.replace(\"_\", \"-\")\n else:\n raise Exception(f\"Unknown snowflake region {region}\")\n return cloud, cloud_region_id\n\n def _is_dataset_pattern_allowed(\n self: SnowflakeCommonProtocol,\n dataset_name: Optional[str],\n dataset_type: Optional[str],\n is_upstream: bool = False,\n ) -> bool:\n if is_upstream and not self.config.validate_upstreams_against_patterns:\n return True\n if not dataset_type or not dataset_name:\n return True\n dataset_params = dataset_name.split(\".\")\n if dataset_type.lower() not in (\n SnowflakeObjectDomain.TABLE,\n SnowflakeObjectDomain.EXTERNAL_TABLE,\n SnowflakeObjectDomain.VIEW,\n SnowflakeObjectDomain.MATERIALIZED_VIEW,\n ):\n return False\n if len(dataset_params) != 3:\n self.report_warning(\n \"invalid-dataset-pattern\",\n f\"Found {dataset_params} of type {dataset_type}\",\n )\n # NOTE: this case returned `True` earlier when extracting lineage\n return False\n\n if not self.config.database_pattern.allowed(\n dataset_params[0].strip('\"')\n ) or not is_schema_allowed(\n self.config.schema_pattern,\n dataset_params[1].strip('\"'),\n dataset_params[0].strip('\"'),\n self.config.match_fully_qualified_names,\n ):\n return False\n\n if dataset_type.lower() in {\n SnowflakeObjectDomain.TABLE\n } and not self.config.table_pattern.allowed(\n self.get_dataset_identifier_from_qualified_name(dataset_name)\n ):\n return False\n\n if dataset_type.lower() in {\n \"view\",\n \"materialized_view\",\n } and not self.config.view_pattern.allowed(\n self.get_dataset_identifier_from_qualified_name(dataset_name)\n ):\n return False\n\n return True\n\n def snowflake_identifier(self: SnowflakeCommonProtocol, identifier: str) -> str:\n # to be in in sync with older connector, convert name to lowercase\n if self.config.convert_urns_to_lowercase:\n return identifier.lower()\n return identifier\n\n @staticmethod\n def get_quoted_identifier_for_database(db_name):\n return f'\"{db_name}\"'\n\n @staticmethod\n def get_quoted_identifier_for_schema(db_name, schema_name):\n return f'\"{db_name}\".\"{schema_name}\"'\n\n @staticmethod\n def get_quoted_identifier_for_table(db_name, schema_name, table_name):\n return f'\"{db_name}\".\"{schema_name}\".\"{table_name}\"'\n\n def get_dataset_identifier(\n self: SnowflakeCommonProtocol, table_name: str, schema_name: str, db_name: str\n ) -> str:\n return self.snowflake_identifier(f\"{db_name}.{schema_name}.{table_name}\")\n\n # Qualified Object names from snowflake audit logs have quotes for for snowflake quoted identifiers,\n # For example \"test-database\".\"test-schema\".test_table\n # whereas we generate urns without quotes even for quoted identifiers for backward compatibility\n # and also unavailability of utility function to identify whether current table/schema/database\n # name should be quoted in above method get_dataset_identifier\n def get_dataset_identifier_from_qualified_name(\n self: SnowflakeCommonProtocol, qualified_name: str\n ) -> str:\n name_parts = qualified_name.split(\".\")\n if len(name_parts) != 3:\n self.report.report_warning(\n \"invalid-dataset-pattern\",\n f\"Found non-parseable {name_parts} for {qualified_name}\",\n )\n return self.snowflake_identifier(qualified_name.replace('\"', \"\"))\n return self.get_dataset_identifier(\n name_parts[2].strip('\"'), name_parts[1].strip('\"'), name_parts[0].strip('\"')\n )\n\n # Note - decide how to construct user urns.\n # Historically urns were created using part before @ from user's email.\n # Users without email were skipped from both user entries as well as aggregates.\n # However email is not mandatory field in snowflake user, user_name is always present.\n def get_user_identifier(\n self: SnowflakeCommonProtocol,\n user_name: str,\n user_email: Optional[str],\n email_as_user_identifier: bool,\n ) -> str:\n if user_email:\n return self.snowflake_identifier(\n user_email\n if email_as_user_identifier is True\n else user_email.split(\"@\")[0]\n )\n return self.snowflake_identifier(user_name)\n\n # TODO: Revisit this after stateful ingestion can commit checkpoint\n # for failures that do not affect the checkpoint\n def warn_if_stateful_else_error(\n self: SnowflakeCommonProtocol, key: str, reason: str\n ) -> None:\n if (\n self.config.stateful_ingestion is not None\n and self.config.stateful_ingestion.enabled\n ):\n self.report_warning(key, reason)\n else:\n self.report_error(key, reason)\n\n def report_warning(self: SnowflakeCommonProtocol, key: str, reason: str) -> None:\n self.report.report_warning(key, reason)\n self.logger.warning(f\"{key} => {reason}\")\n\n def report_error(self: SnowflakeCommonProtocol, key: str, reason: str) -> None:\n self.report.report_failure(key, reason)\n self.logger.error(f\"{key} => {reason}\")\n\n\nclass SnowflakeConnectionProtocol(SnowflakeLoggingProtocol, Protocol):\n connection: Optional[SnowflakeConnection]\n config: SnowflakeV2Config\n report: SnowflakeV2Report\n\n def create_connection(self) -> Optional[SnowflakeConnection]:\n ...\n\n def report_error(self, key: str, reason: str) -> None:\n ...\n\n\nclass SnowflakeConnectionMixin:\n def get_connection(self: SnowflakeConnectionProtocol) -> SnowflakeConnection:\n if self.connection is None:\n # Ideally this is never called here\n self.logger.info(\"Did you forget to initialize connection for module?\")\n self.connection = self.create_connection()\n\n # Connection is already present by the time its used for query\n # Every module initializes the connection or fails and returns\n assert self.connection is not None\n return self.connection\n\n # If connection succeeds, return connection, else return None and report failure\n def create_connection(\n self: SnowflakeConnectionProtocol,\n ) -> Optional[SnowflakeConnection]:\n try:\n conn = self.config.get_connection()\n except Exception as e:\n logger.debug(e, exc_info=e)\n if \"not granted to this user\" in str(e):\n self.report_error(\n GENERIC_PERMISSION_ERROR_KEY,\n f\"Failed to connect with snowflake due to error {e}\",\n )\n else:\n logger.debug(e, exc_info=e)\n self.report_error(\n \"snowflake-connection\",\n f\"Failed to connect to snowflake instance due to error {e}.\",\n )\n return None\n else:\n return conn\n\n def close(self: SnowflakeConnectionProtocol) -> None:\n if self.connection is not None and not self.connection.is_closed():\n self.connection.close()\n\n\ndef is_permission_error(e: Exception) -> bool:\n msg = str(e)\n # 002003 (02000): SQL compilation error: Database/SCHEMA 'XXXX' does not exist or not authorized.\n # Insufficient privileges to operate on database 'XXXX'\n return \"Insufficient privileges\" in msg or \"not authorized\" in msg\n","repo_name":"datahub-project/datahub","sub_path":"metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_utils.py","file_name":"snowflake_utils.py","file_ext":"py","file_size_in_byte":10933,"program_lang":"python","lang":"en","doc_type":"code","stars":8629,"dataset":"github-code","pt":"4"} +{"seq_id":"43376024886","text":"import csv\nimport sys\n\nsys.path.insert(0, '../classes')\n\nfrom .classes import Interval, Day, DailyDay\n\ndef get_days_daily(path):\n days =[]\n\n with open(path, 'r') as infile:\n reader = csv.reader(infile)\n header = next(reader)\n\n for row in reader:\n days.append(DailyDay(row[0], row[1], row[2], row[3], row[4], row[5], row[6]))\n\n return days\n\ndef get_days(path):\n\n days = []\n day = Day('', [])\n addIntervals = False\n\n # Read CSV File\n with open(path, 'r') as infile:\n reader = csv.reader(infile) # Reads one line at a time\n header = next(reader) # Takes the current row, converts to list, advances to next row\n \n # Loop through entries in CSV File\n for row in reader:\n\n print(type(row))\n # Get time\n time = row[1]\n\n # Check if it's the start of the market - 09:30:AM\n if time == '9:28' or time == '9:29' or time == '9:30' or time == '9:31' or time == '9:32':\n addIntervals = True\n elif time == '16:01':\n addIntervals = False\n\n if addIntervals == True:\n\n # Check interval date for a new day\n if len(day.intervals) >= 2:\n lastDate = day.intervals[len(day.intervals)-1].date\n\n # Compare current date to the last date saved\n # If not the same, append the day and reset it\n if row[0] != lastDate:\n days.append(day)\n day = Day('',[])\n \n # Create interval object\n interval = Interval(row[0], row[1], float(row[2]), float(row[3]), float(row[4]), float(row[5]))\n \n # Add interval to day\n day.add_interval(interval)\n day.set_date(interval.date)\n\n return days","repo_name":"Kesslerjx/stock_backtesting","sub_path":"handlers/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36018334193","text":"# https://leetcode.com/problems/battleships-in-a-board/description/\nclass Solution(object):\n def countBattleships(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: int\n \"\"\"\n ships = 0\n for x in range(len(board)):\n row = board[x]\n for y in range(len(row)):\n if board[x][y] == 'X':\n if y-1 >= 0 and board[x][y-1] == 'X':\n continue\n if x-1 >= 0 and board[x-1][y] == 'X':\n continue\n else:\n ships = ships + 1\n return ships\n","repo_name":"baieric/hackerRank","sub_path":"leetcode/battleship_in_a_board.py","file_name":"battleship_in_a_board.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34304026939","text":"import sys\n\nsys.path.append('./models')\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom models.distributions import Bernoulli, Categorical\nfrom models.utils import init\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Policy(nn.Module):\n def __init__(self, obs_shape, action_space, base=None, base_kwargs=None):\n super(Policy, self).__init__()\n if base_kwargs is None:\n base_kwargs = {}\n if base is None:\n base = CNNBase\n\n self.base = base(obs_shape[0], **base_kwargs)\n\n if action_space.__class__.__name__ == \"Discrete\":\n num_outputs = action_space.n\n self.dist = Categorical(self.base.output_size, num_outputs)\n else:\n raise NotImplementedError\n self.n_actions = num_outputs\n\n\n @property\n def recurrent_hidden_state_size(self):\n \"\"\"Size of rnn_hx.\"\"\"\n return self.base.recurrent_hidden_state_size\n\n def forward(self, inputs, rnn_hxs, masks):\n raise NotImplementedError\n\n def act(self, inputs, rnn_hxs, masks, instruction=None, deterministic=False):\n value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks, instruction)\n dist = self.dist(actor_features)\n\n if deterministic:\n action = dist.mode()\n else:\n action = dist.sample()\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy()\n\n return value, action, action_log_probs, rnn_hxs, dist.probs, dist_entropy\n\n def get_value(self, inputs, rnn_hxs, masks):\n value, _, _ = self.base(inputs, rnn_hxs, masks)\n return value\n\n def evaluate_actions(self, inputs, rnn_hxs, masks, action):\n value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)\n dist = self.dist(actor_features)\n\n action_log_probs = dist.log_probs(action)\n\n return value, action_log_probs, dist.logits, rnn_hxs\n\n def get_features(self, inputs, rnn_hxs, masks):\n _, actor_features, _ = self.base(inputs, rnn_hxs, masks)\n return actor_features\n\n\nclass NNBase(nn.Module):\n def __init__(self, recurrent_input_size, hidden_size):\n super(NNBase, self).__init__()\n\n self._hidden_size = hidden_size\n self.gru = nn.GRU(recurrent_input_size, hidden_size)\n for name, param in self.gru.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n elif 'weight' in name:\n nn.init.orthogonal_(param)\n\n\n @property\n def recurrent_hidden_state_size(self):\n return self._hidden_size\n\n @property\n def output_size(self):\n return self._hidden_size\n\n def _forward_gru(self, x, hxs, masks):\n if x.size(0) == hxs.size(0):\n x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))\n x = x.squeeze(0)\n hxs = hxs.squeeze(0)\n else:\n # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n N = hxs.size(0)\n T = int(x.size(0) / N)\n\n # unflatten\n x = x.view(T, N, x.size(1))\n\n # Same deal with masks\n masks = masks.view(T, N)\n\n # Let's figure out which steps in the sequence have a zero for any agent\n # We will always assume t=0 has a zero in it as that makes the logic cleaner\n has_zeros = ((masks[1:] == 0.0) \\\n .any(dim=-1)\n .nonzero()\n .squeeze()\n .cpu())\n\n # +1 to correct the masks[1:]\n if has_zeros.dim() == 0:\n # Deal with scalar\n has_zeros = [has_zeros.item() + 1]\n else:\n has_zeros = (has_zeros + 1).numpy().tolist()\n\n # add t=0 and t=T to the list\n has_zeros = [0] + has_zeros + [T]\n\n hxs = hxs.unsqueeze(0)\n outputs = []\n for i in range(len(has_zeros) - 1):\n # We can now process steps that don't have any zeros in masks together!\n # This is much faster\n start_idx = has_zeros[i]\n end_idx = has_zeros[i + 1]\n\n rnn_scores, hxs = self.gru(\n x[start_idx:end_idx],\n hxs * masks[start_idx].view(1, -1, 1))\n\n outputs.append(rnn_scores)\n\n # assert len(outputs) == T\n # x is a (T, N, -1) tensor\n x = torch.cat(outputs, dim=0)\n # flatten\n x = x.view(T * N, -1)\n hxs = hxs.squeeze(0)\n\n return x, hxs\n\n\nclass CNNBase(NNBase):\n def __init__(self, num_inputs, hidden_size=256, vocab_size=10, embed_dim=64):\n super(CNNBase, self).__init__(hidden_size, hidden_size)\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), nn.init.calculate_gain('relu'))\n\n self.main = nn.Sequential(\n init_(nn.Conv2d(num_inputs, 16, 8, stride=4)), nn.ReLU(),\n init_(nn.Conv2d(16, 32, 4, stride=2)), nn.ReLU(), Flatten(),\n init_(nn.Linear(32 * 7 * 10, hidden_size)), nn.ReLU())\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0))\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.embedding = nn.Embedding(vocab_size, embed_dim)\n self.lstm = nn.LSTM(embed_dim, 64, num_layers=1, batch_first=True)\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks, instruction=None):\n x = self.main(inputs/255.0)\n\n if instruction is not None:\n embedded_inst = self.embedding(instruction)\n out, hn = self.lstm(embedded_inst)\n h, c = hn\n\n x = torch.concat([x, h], dim=1)\n\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n return self.critic_linear(x), x, rnn_hxs\n\n\nclass ResNetBase(NNBase):\n def __init__(self, num_inputs, hidden_size=256):\n raise NotImplementedError\n","repo_name":"junjungoal/IMPALA-pytorch","sub_path":"models/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"4"} +{"seq_id":"23353090744","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 1 19:58:26 2021\n\n@author: Steffen Coenen and Ekin Ugurel\n\"\"\"\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pyomo.environ as pyo\n\nimport utils as pu\n\ndef init():\n import matplotlib\n \n matplotlib.pyplot.rcdefaults()\n matplotlib.pyplot.style.use(u\"config/project.mplstyle\")\n matplotlib.pyplot.close(\"all\")\n\n\nclass EVChargingLoadModel():\n \"\"\"\n This is the main class that contains all necessary variables and \n functions related to the EV charging load model.\n \"\"\"\n \n def __init__(self, configfile):\n self.configfile = configfile\n \n self.read_config()\n self.def_general_variables()\n \n self.read_base_elec_rate()\n self.read_base_charg_load()\n self.read_target_charg_load()\n \n # print(self.df.head())\n \n def __str__(self):\n return \"\\nmodel info:\\n\\tname = {0:s} \\\n \\n\\tstartdate = {1} \\\n \\n\\tenddate = {2} \\\n \\n\\ttime step = {3:.0f} minute(s) \\\n \\n\\tbase_elec_rate_file = {4:s} \\\n \\n\\tbase_charg_load_file = {5:s} \\\n \\n\\ttarget_charg_load_file = {6:s}\\n\".format(self.name, self.startdate, self.enddate, self.time_step, self.base_elec_rate_file, self.base_charg_load_file, self.target_charg_load_file)\n \n def get_time_from_bin(self, i):\n \"\"\"\n Returns the datetime object that corresponds to the time bin with index i.\n\n Parameters\n ----------\n i : datetime\n Time bin index of desired time.\n \"\"\"\n \n return self.startdate + pd.Timedelta(hours=24*i/self.n_bins_per_day)\n \n def read_config(self):\n \"\"\"\n Retrieves information from the configuration file (self.configfile) \n and saves it into the corresponding class variables.\n \"\"\"\n \n self.cfg = pu.read_config_file(self.configfile)\n cfg = self.cfg[\"General Info\"]\n \n self.name = cfg[\"name\"]\n self.folder = cfg[\"folder\"] #output folder or similar\n self.seed = None if cfg[\"seed\"]==\"None\" else cfg[\"seed\"]\n self.solver = cfg[\"solver\"]\n # self.n_EVs = cfg[\"n_EVs\"]\n \n self.startdate = pd.to_datetime(cfg[\"startdate\"])\n self.enddate = pd.to_datetime(cfg[\"enddate\"])\n self.time_step = int(cfg[\"time_step\"]) #[minutes]\n \n self.duration = self.enddate - self.startdate\n self.n_days = int(np.ceil(self.duration.total_seconds()/(60*60*24))) #number of (partial) days\n self.n_bins_per_day = int(24*60/self.time_step) #number of bins in one day\n self.n_bins = int(self.duration.total_seconds() / (self.time_step*60)) #self.n_days * self.n_bins_per_day #number of bins in simulation time frame\n self.bins = np.array(range(self.n_bins))\n self.xticks = [self.startdate + pd.Timedelta(minutes=self.time_step*i) for i in self.bins]\n \n # self.alpha = float(cfg[\"alpha\"]) #response rate between electricity rate change and charging load change\n self.alpha_factor = float(cfg[\"alpha_factor\"]) #factor to be multiplied with alpha_electricity_rate_increases and alpha_electricity_rate_decreases (to potentially enhance the response to price changes)\n self.alpha_electricity_rate_increases = self.alpha_factor * float(cfg[\"alpha_electricity_rate_increases\"])\n self.alpha_electricity_rate_decreases = self.alpha_factor * float(cfg[\"alpha_electricity_rate_decreases\"])\n \n self.tolerance_total_charged_energy_up = float(cfg[\"tolerance_total_charged_energy_up\"])\n self.tolerance_total_charged_energy_down = float(cfg[\"tolerance_total_charged_energy_down\"])\n \n self.max_electricity_rate_change_up = float(cfg[\"max_electricity_rate_change_up\"])\n self.max_electricity_rate_change_down = float(cfg[\"max_electricity_rate_change_down\"])\n \n self.mu_plugin, self.sigma_plugin = cfg[\"mu_plugin\"], cfg[\"sigma_plugin\"]\n self.mu_plugout, self.sigma_plugout = cfg[\"mu_plugout\"], cfg[\"sigma_plugout\"]\n \n #baseline electricity rate\n self.base_elec_rate_folder = cfg[\"base_elec_rate_folder\"]\n self.base_elec_rate_file = cfg[\"base_elec_rate_file\"]\n self.base_elec_rate_scale = float(cfg[\"base_elec_rate_scale\"])\n \n #baseline charging load\n self.base_charg_load_folder = cfg[\"base_charg_load_folder\"]\n self.base_charg_load_file = cfg[\"base_charg_load_file\"]\n self.base_charg_load_scale = float(cfg[\"base_charg_load_scale\"])\n \n #target charging load\n self.target_charg_load_folder = cfg[\"target_charg_load_folder\"]\n self.target_charg_load_file = cfg[\"target_charg_load_file\"]\n self.target_charg_load_scale = float(cfg[\"target_charg_load_scale\"])\n \n # cfg[\"\"]\n \n def def_general_variables(self):\n \"\"\"\n Defines some general class variables, such as the main DataFrame \n object (self.df) or colors and labels in plots. \n Is called only once when instantiating the class object.\n \"\"\"\n \n #df\n self.df = pd.DataFrame(index = self.bins, \n columns = [\"time\", \n \"base_elec_rate\" , \n \"opt_elec_rate\", \n # \"base_n_EVs_charging\", \n # \"res_n_EVs_charging\", \n \"base_charg_load\", \n \"target_charg_load\", \n \"res_charg_load\"])\n self.df[\"time\"] = [self.get_time_from_bin(i) for i in self.df.index]\n self.df.index += 1 #prepare for pyomo being 1-indexed\n \n # self.datecut = str(self.startdate.date())+\"_\"+str(self.enddate.date())\n self.datecut = ( str(self.startdate) + \"_\" + str(self.enddate) ).replace(\":\",\"-\")\n \n #hardcoded variables\n self.figsize = (8,4) #legend next to plots\n self.x = self.df[\"time\"]\n self.xlabel = \"time\"\n \n self.labels = {\"opt_elec_rate\": \"Optimized electricity rate\", \"base_elec_rate\": \"Baseline electricity rate\", \"res_charg_load\": \"Resulting charging load\", \"base_charg_load\": \"Baseline charging load\", \"target_charg_load\": \"Target charging load\", \"rel_charg_load\": r\"$\\frac{\\mathrm{Resulting\\;charging\\;load}}{\\mathrm{Baseline\\;charging\\;load}} - 1$\"}\n self.labels.update({\"CMA_opt_elec_rate\": \"Optimized electricity rate (CMA)\", \"CMA_base_elec_rate\": \"Baseline electricity rate (CMA)\", \"CMA_res_charg_load\": \"Resulting charging load (CMA)\", \"CMA_base_charg_load\": \"Baseline charging load (CMA)\", \"CMA_target_charg_load\": \"Target charging load (CMA)\", \"CMA_rel_charg_load\": r\"$\\frac{\\mathrm{Resulting\\;charging\\;load}}{\\mathrm{Baseline\\;charging\\;load}} - 1$\"})\n \n settings = self.cfg[\"Plot Settings\"]\n self.colors = {\"opt_elec_rate\": settings[\"col_OER\"], \"base_elec_rate\": settings[\"col_BER\"], \"res_charg_load\": settings[\"col_RCL\"], \"base_charg_load\": settings[\"col_BCL\"], \"target_charg_load\": settings[\"col_TCL\"], \"rel_charg_load\": settings[\"col_rel_charg_load\"]}\n self.colors.update({\"CMA_opt_elec_rate\": settings[\"col_OER\"], \"CMA_base_elec_rate\": settings[\"col_BER\"], \"CMA_res_charg_load\": settings[\"col_RCL\"], \"CMA_base_charg_load\": settings[\"col_BCL\"], \"CMA_target_charg_load\": settings[\"col_TCL\"], \"CMA_rel_charg_load\": settings[\"col_rel_charg_load\"]})\n \n self.zorders = {\"opt_elec_rate\": float(settings[\"zor_OER\"]), \"base_elec_rate\": float(settings[\"zor_BER\"]), \"res_charg_load\": float(settings[\"zor_RCL\"]), \"base_charg_load\": float(settings[\"zor_BCL\"]), \"target_charg_load\": float(settings[\"zor_TCL\"]), \"rel_charg_load\": float(settings[\"zor_RCL\"])}\n self.zorders.update({\"CMA_opt_elec_rate\": float(settings[\"zor_OER\"]), \"CMA_base_elec_rate\": float(settings[\"zor_BER\"]), \"CMA_res_charg_load\": float(settings[\"zor_RCL\"]), \"CMA_base_charg_load\": float(settings[\"zor_BCL\"]), \"CMA_target_charg_load\": float(settings[\"zor_TCL\"]), \"CMA_rel_charg_load\": float(settings[\"zor_RCL\"])})\n \n self.elec_rates = [\"opt_elec_rate\", \"base_elec_rate\"]\n self.charg_loads = [\"res_charg_load\", \"base_charg_load\", \"target_charg_load\"]\n \n def read_base_elec_rate(self):\n \"\"\"\n Reads-in the baseline electricity rate from the respective data file \n (self.base_elec_rate_file), by time of the day.\n \n Scales the numbers given in that file with self.base_elec_rate_scale.\n \"\"\"\n \n base_elec_rate = pd.read_csv(self.base_elec_rate_folder + self.base_elec_rate_file, sep=\";\", dtype={\"electricity rate\": float})\n base_elec_rate.index += 1 #prepare for pyomo being 1-indexed\n \n if self.time_step > 1:\n for index in self.df.index:\n index2 = index - ((index-1)//self.n_bins_per_day)*self.n_bins_per_day\n self.df.loc[index, \"base_elec_rate\"] = base_elec_rate.loc[index2*self.time_step, \"electricity rate\"]\n else:\n # for i in range(self.n_days-1):\n # base_elec_rate = base_elec_rate.append(base_elec_rate, ignore_index=True)\n # self.df[\"base_elec_rate\"] = base_elec_rate[\"electricity rate\"]\n base_elec_rate = pd.concat([base_elec_rate]*self.n_days, ignore_index=True)\n base_elec_rate.index += 1 #prepare for pyomo being 1-indexed\n self.df[\"base_elec_rate\"] = base_elec_rate.loc[base_elec_rate.index <= self.n_bins, \"electricity rate\"]\n \n self.df[\"base_elec_rate\"] *= self.base_elec_rate_scale\n \n print(\"Read baseline electricity rate.\")\n \n def read_base_charg_load(self):\n \"\"\"\n Reads-in the baseline charging load from the respective data file \n (self.base_charg_load_file), by date and time of the day.\n \n Scales the numbers given in that file with self.base_charg_load_scale.\n Also computes the baseline total charged energy.\n \"\"\"\n \n base_charg_load = pd.read_csv(self.base_charg_load_folder + self.base_charg_load_file, sep=\";\", dtype={\"charging load\": float}, parse_dates=[\"time\"])\n base_charg_load.index += 1 #prepare for pyomo being 1-indexed\n \n if self.time_step > 1:\n for index in self.df.index:\n self.df.loc[index, \"base_charg_load\"] = base_charg_load.loc[index*self.time_step, \"charging load\"]\n else:\n self.df[\"base_charg_load\"] = base_charg_load.loc[base_charg_load[\"time\"] 1:\n for index in self.df.index:\n index2 = index - ((index-1)//self.n_bins_per_day)*self.n_bins_per_day\n self.df.loc[index, \"target_charg_load\"] = target_charg_load.loc[index2*self.time_step, \"charging load\"]\n else:\n target_charg_load = pd.concat([target_charg_load]*self.n_days, ignore_index=True)\n target_charg_load.index += 1 #prepare for pyomo being 1-indexed\n self.df[\"target_charg_load\"] = target_charg_load.loc[target_charg_load.index <= self.n_bins, \"charging load\"]\n \n self.df[\"target_charg_load\"] *= self.target_charg_load_scale\n \n #scale target charging load to total energy demand\n self.df[\"target_charg_load\"] *= self.base_total_charged_energy/sum(self.df[\"target_charg_load\"])\n \n print(\"Read target charging load.\")\n \n def run(self):\n \"\"\"\n Umbrella function to run the optimization model.\n \"\"\"\n \n #show and save initial dataframe\n # print(self.df)\n # print(self.df.sum())\n self.df.to_csv(self.folder+\"df_initial.csv\", sep=\";\")\n \n self.model = pyo.ConcreteModel()\n \n self.model.t = pyo.RangeSet(self.n_bins)\n \n # decision variable\n self.model.opt_elec_rate = pyo.Var(self.model.t, domain=pyo.NonNegativeReals)\n \n #initialize decision variable's values\n for i in self.model.t:\n self.model.opt_elec_rate[i] = self.df.loc[i,\"base_elec_rate\"]\n # self.model.opt_elec_rate[i] = 0.1 #+ 0.1*np.random.random()\n \n # view initial decision variables' values\n # self.model.opt_elec_rate.pprint()\n \n #parameters\n #TODO: maybe simplify the initialize argument passed\n self.model.base_elec_rate = pyo.Param(self.model.t, initialize=pd.to_numeric(self.df[\"base_elec_rate\"] ).to_dict(), domain=pyo.NonNegativeReals)\n self.model.base_charg_load = pyo.Param(self.model.t, initialize=pd.to_numeric(self.df[\"base_charg_load\"] ).to_dict(), domain=pyo.NonNegativeReals)\n self.model.target_charg_load = pyo.Param(self.model.t, initialize=pd.to_numeric(self.df[\"target_charg_load\"]).to_dict(), domain=pyo.NonNegativeReals)\n # self.model.res_charg_load = pyo.Param(self.model.t, initialize=pd.to_numeric(self.df[\"res_charg_load\"] ).to_dict(), domain=pyo.NonNegativeReals, mutable=True)\n \n # self.model.alpha = pyo.Param(initialize=self.alpha, domain=pyo.NonNegativeReals)\n alpha_init = {}\n for i in self.model.t:\n alpha_init[i] = self.alpha(i)\n self.alpha = list(alpha_init.values())\n # print(alpha_init)\n self.model.alpha = pyo.Param(self.model.t, initialize=alpha_init, domain=pyo.NonNegativeReals)\n self.model.tolerance_total_charged_energy_up = self.tolerance_total_charged_energy_up\n self.model.tolerance_total_charged_energy_down = self.tolerance_total_charged_energy_down\n self.model.max_electricity_rate_change_up = self.max_electricity_rate_change_up\n self.model.max_electricity_rate_change_down = self.max_electricity_rate_change_down\n \n # add objective function to the model\n self.model.OBJ = pyo.Objective(rule=objective)\n \n self.initial_objective = self.model.OBJ()\n print(\"initial objective = {0:.2f}\".format(self.initial_objective))\n \n # add constraints to the model\n self.model.Constraint1 = pyo.Constraint(rule=constraint_sustain_total_charged_energy_up)\n self.model.Constraint2 = pyo.Constraint(rule=constraint_sustain_total_charged_energy_down)\n self.model.Constraint3 = pyo.Constraint(self.model.t, rule=constraint_max_electricity_rate_change_up)\n self.model.Constraint4 = pyo.Constraint(self.model.t, rule=constraint_max_electricity_rate_change_down)\n \n # solve the optimization problem\n print(\"solver = {0:s}\".format(self.solver))\n solution = pyo.SolverFactory(self.solver).solve(self.model)\n # print(solution)\n \n print(\"Done solving the optimization problem.\")\n \n # log feasibility/infeasibility\n # from pyomo.util.infeasible import log_infeasible_constraints\n # import logging\n # log_infeasible_constraints(self.model, log_expression=True, log_variables=True)\n # logging.basicConfig(filename='example.log', level=logging.INFO)\n \n self.final_objective = self.model.OBJ()\n self.obj_ratio = self.final_objective/self.initial_objective\n print(\"final objective = {0:.2f}\".format(self.final_objective))\n print(\"objective value change (final/initial-1) = {0:.2f}%\".format((self.obj_ratio-1)*100))\n \n # retrieve decision variables' and parameters' final values\n self.df[\"opt_elec_rate\"] = [self.model.opt_elec_rate[i].value for i in self.model.t]\n \n self.df[\"base_elec_rate\"] = [self.model.base_elec_rate[i] for i in self.model.t]\n self.df[\"base_charg_load\"] = [self.model.base_charg_load[i] for i in self.model.t]\n self.df[\"target_charg_load\"] = [self.model.target_charg_load[i] for i in self.model.t]\n # self.df[\"res_charg_load\"] = [pyo.value(self.model.res_charg_load[i]) for i in self.model.t]\n # self.df[\"res_charg_load\"] = self.df[\"base_charg_load\"] * (1 - self.model.alpha.value*(self.df[\"opt_elec_rate\"]-self.df[\"base_elec_rate\"])/self.df[\"base_elec_rate\"])\n self.df[\"res_charg_load\"] = self.df[\"base_charg_load\"] * (1 - self.alpha*(self.df[\"opt_elec_rate\"]-self.df[\"base_elec_rate\"])/self.df[\"base_elec_rate\"])\n self.df[\"rel_charg_load\"] = self.df[\"res_charg_load\"]/self.df[\"base_charg_load\"] - 1\n \n #add central moving averages (CMAs) to df\n window = int(self.cfg[\"CMA\"][\"CMA_window\"])\n min_periods = min(int(self.cfg[\"CMA\"][\"CMA_min_periods\"]), window)\n std = window\n for column in [\"opt_elec_rate\", \"base_elec_rate\", \"base_charg_load\", \"target_charg_load\", \"res_charg_load\", \"rel_charg_load\"]:\n self.df[\"CMA_\"+column] = self.df[column].rolling(window, min_periods=min_periods, win_type=\"gaussian\", center=True).mean(std=std) #Gaussian-distributed weights within one window\n \n #show and save final dataframe\n # print(self.df)\n # print(self.df.sum())\n self.df.to_csv(self.folder+\"df_final.csv\", sep=\";\")\n \n self.res_total_charged_energy = self.df[\"res_charg_load\"].sum()\n print(\"sum(BCL) = {0:.2f}\".format(self.base_total_charged_energy))\n print(\"sum(RCL) = {0:.2f}\".format(self.res_total_charged_energy))\n print(\"sum(RCL)/sum(BCL) = {0:.4f}\".format(self.res_total_charged_energy/self.base_total_charged_energy))\n \n def plot_BCL(self, plot_CMAs=False, save=False):\n fig,ax = plt.subplots()\n \n plot_type = \"base_charg_load\"\n \n y = plot_type\n ylabel = \"Baseline charging load [kW]\"\n ylim = (0, None)\n ax.set_xlim(self.startdate, self.startdate+pd.Timedelta(days=1))\n \n if plot_CMAs:\n y = \"CMA_\" + y\n \n #plot\n ax.plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n ax.set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n ax.set_ylabel(ylabel)\n # ax.legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\",\n # mode=\"expand\", borderaxespad=0)\n plt.tight_layout()\n ax.grid()\n ax.set_ylim(ylim)\n \n if save:\n alph = \"alpha_in={0:.1f}_alpha_de={1:.1f}/\".format(self.alpha_electricity_rate_increases, self.alpha_electricity_rate_decreases)\n add = \"step={0:d}_BER={1:s}_BCL={2:s}_TCL={3:s}_\".format(self.time_step, self.base_elec_rate_file[:-4], self.base_charg_load_file[:-4], self.target_charg_load_file[:-4])\n CMA = \"CMA_\" if plot_CMAs else \"\"\n filename = self.folder + self.datecut + \"/\" + alph + CMA + add + plot_type + \"_only\" + \".png\"\n pu.save_figure(fig, filename)\n \n def plot_charg_loads_rel(self, plot_CMAs=False, save=False):\n fig,axs = plt.subplots(2, 1, sharex=True, figsize=(self.figsize[0], 1.6*self.figsize[1]), gridspec_kw={\"height_ratios\": [2, 1]})\n \n #plot charging loads\n y_cols = self.charg_loads.copy()\n ylabel = \"Charging load [kW]\"\n ylim = (0, None)\n \n if plot_CMAs:\n for k in range(len(y_cols)):\n y_cols[k] = \"CMA_\" + y_cols[k]\n \n #plot\n for y in y_cols:\n axs[0].plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n # axs[1].set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n axs[0].set_ylabel(ylabel)\n axs[0].legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # axs[0].legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\",\n # mode=\"expand\", borderaxespad=0)\n plt.tight_layout()\n # ax.grid()\n axs[0].set_ylim(ylim)\n \n #plot resulting/baseline - 1\n y = \"rel_charg_load\"\n ylabel = \"Resulting/Baseline - 1\"\n ylim = (None, None)\n \n if plot_CMAs:\n y = \"CMA_\" + y\n \n #plot\n axs[1].plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n axs[1].set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n axs[1].set_ylabel(ylabel)\n # axs[1].legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # plt.tight_layout()\n # ax.grid()\n axs[1].set_ylim(ylim)\n \n axs[1].axhline(0, color=\"grey\", alpha=0.6, zorder=-1)\n \n if save:\n alph = \"alpha_in={0:.1f}_alpha_de={1:.1f}/\".format(self.alpha_electricity_rate_increases, self.alpha_electricity_rate_decreases)\n add = \"step={0:d}_BER={1:s}_BCL={2:s}_TCL={3:s}_\".format(self.time_step, self.base_elec_rate_file[:-4], self.base_charg_load_file[:-4], self.target_charg_load_file[:-4])\n CMA = \"CMA_\" if plot_CMAs else \"\"\n plot_type = \"charg_loads_rel\"\n filename = self.folder + self.datecut + \"/\" + alph + CMA + add + plot_type + \".png\"\n pu.save_figure(fig, filename)\n \n def plot_all_in_one(self, plot_CMAs=False, save=False):\n fig,axs = plt.subplots(3, 1, sharex=True, figsize=(self.figsize[0], 2*self.figsize[1]), gridspec_kw={\"height_ratios\": [2, 1, 2]})\n \n #plot charging loads\n y_cols = self.charg_loads.copy()\n ylabel = \"Charging load [kW]\"\n ylim = (0, None)\n \n if plot_CMAs:\n for k in range(len(y_cols)):\n y_cols[k] = \"CMA_\" + y_cols[k]\n \n #plot\n for y in y_cols:\n axs[0].plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n # axs[1].set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n axs[0].set_ylabel(ylabel)\n axs[0].legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # axs[0].legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\",\n # mode=\"expand\", borderaxespad=0)\n plt.tight_layout()\n # ax.grid()\n axs[0].set_ylim(ylim)\n \n #plot resulting/baseline - 1\n y = \"rel_charg_load\"\n ylabel = \"Resulting/Baseline - 1\"\n ylim = (None, None)\n \n if plot_CMAs:\n y = \"CMA_\" + y\n \n #plot\n axs[1].plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n axs[1].set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n axs[1].set_ylabel(ylabel)\n # axs[1].legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # plt.tight_layout()\n # ax.grid()\n axs[1].set_ylim(ylim)\n \n axs[1].axhline(0, color=\"grey\", alpha=0.6, zorder=-1)\n \n #plot electricity rates\n y_cols = self.elec_rates.copy()\n ylabel = \"Electricity rate [USD/kWh]\"\n ylim = (0, None)\n \n if plot_CMAs:\n for k in range(len(y_cols)):\n y_cols[k] = \"CMA_\" + y_cols[k]\n \n #plot\n for y in y_cols:\n axs[2].plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n # axs[1].set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n axs[2].set_ylabel(ylabel)\n axs[2].legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # axs[0].legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\",\n # mode=\"expand\", borderaxespad=0)\n plt.tight_layout()\n # ax.grid()\n axs[2].set_ylim(ylim)\n \n if save:\n alph = \"alpha_in={0:.1f}_alpha_de={1:.1f}/\".format(self.alpha_electricity_rate_increases, self.alpha_electricity_rate_decreases)\n add = \"step={0:d}_BER={1:s}_BCL={2:s}_TCL={3:s}_\".format(self.time_step, self.base_elec_rate_file[:-4], self.base_charg_load_file[:-4], self.target_charg_load_file[:-4])\n CMA = \"CMA_\" if plot_CMAs else \"\"\n plot_type = \"all\"\n filename = self.folder + self.datecut + \"/\" + alph + CMA + add + plot_type + \".png\"\n pu.save_figure(fig, filename)\n \n def plot(self, plot_type=\"elec_rate\", plot_CMAs=False, save=False):\n fig,ax = plt.subplots(figsize=self.figsize)\n \n if plot_type==\"elec_rates\":\n y_cols = self.elec_rates.copy()\n ylabel = \"Electricity rate [USD/kWh]\"\n ylim = (0, None)\n elif plot_type==\"charg_loads\":\n y_cols2 = self.charg_loads.copy()\n y_cols = y_cols2\n ylabel = \"Charging load [kW]\"\n ylim = (0, None)\n elif plot_type==\"rel_charg_load\":\n y_cols = [plot_type]\n ylabel = \"Resulting/Baseline - 1\"\n ylim = (None, None)\n \n if plot_CMAs:\n for k in range(len(y_cols)):\n y_cols[k] = \"CMA_\" + y_cols[k]\n \n #plot\n for y in y_cols:\n ax.plot(self.x, self.df[y], label=self.labels[y], color=self.colors[y], zorder=self.zorders[y])\n \n ax.set_xlabel(self.xlabel)\n plt.xticks(rotation=30, ha=\"right\")\n ax.set_ylabel(ylabel)\n ax.legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\n # ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc=\"lower left\",\n # mode=\"expand\", borderaxespad=0)\n plt.tight_layout()\n # ax.grid()\n ax.set_ylim(ylim)\n \n if plot_type==\"rel_charg_load\":\n ax.axhline(0, color=\"grey\", alpha=0.6, zorder=-1)\n ax.get_legend().remove()\n \n if save:\n alph = \"alpha_in={0:.1f}_alpha_de={1:.1f}/\".format(self.alpha_electricity_rate_increases, self.alpha_electricity_rate_decreases)\n add = \"step={0:d}_BER={1:s}_BCL={2:s}_TCL={3:s}_\".format(self.time_step, self.base_elec_rate_file[:-4], self.base_charg_load_file[:-4], self.target_charg_load_file[:-4])\n CMA = \"CMA_\" if plot_CMAs else \"\"\n filename = self.folder + self.datecut + \"/\" + alph + CMA + add + plot_type + \".png\"\n pu.save_figure(fig, filename)\n \n def print_results(self, plot_CMAs=False, save=False):\n self.plot_BCL(plot_CMAs, save)\n self.plot(\"elec_rates\", plot_CMAs, save)\n self.plot(\"charg_loads\", plot_CMAs, save)\n self.plot(\"rel_charg_load\", plot_CMAs, save)\n self.plot_charg_loads_rel(plot_CMAs, save)\n self.plot_all_in_one(plot_CMAs, save)\n \n # def alpha(self, BCL, TCL):\n def alpha(self, i):\n BCL = self.model.base_charg_load[i]\n TCL = self.model.target_charg_load[i]\n cond = BCL >= TCL\n \n if cond: #BCL>=TCL, OER>=BER\n alpha = self.alpha_electricity_rate_increases #0.2\n else: #BCL= 1-model.tolerance_total_charged_energy_down\n\ndef constraint_max_electricity_rate_change_up(model, i):\n \"\"\"\n Constraints the electricity rate *increase* induced by the model to a \n certain amount above the baseline electricity price.\n \"\"\"\n \n return (model.opt_elec_rate[i] - model.base_elec_rate[i]) <= model.max_electricity_rate_change_up\n\ndef constraint_max_electricity_rate_change_down(model, i):\n \"\"\"\n Constraints the electricity rate *decrease* induced by the model to a \n certain amount above the baseline electricity price.\n \"\"\"\n \n return -(model.opt_elec_rate[i] - model.base_elec_rate[i]) <= model.max_electricity_rate_change_down\n","repo_name":"steffen-coe/EVCLOM","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":31963,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"16439026629","text":"import logging\nfrom base64 import b64decode, b64encode\n\nimport numpy as np\nimport numpy.typing as npt\nimport onnx\nfrom orjson import orjson\nfrom pynumaflow.mapper import Datum, Messages, Message\nimport onnxruntime as ort\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass InferenceUDF:\n __slots__ = (\"model\", \"model_key\", \"ort_sess\")\n\n def __init__(self, model_path: str, model_key: str | None = None):\n self.model = onnx.load(model_path)\n self.model_key = model_key or model_path\n onnx.checker.check_model(self.model)\n self.ort_sess = ort.InferenceSession(model_path)\n\n def __call__(self, keys: list[str], datum: Datum) -> Messages:\n return self.exec(keys, datum)\n\n def compute(self, input_: npt.NDArray[float]) -> npt.NDArray[float]:\n outputs = self.ort_sess.run(None, {\"input\": input_})\n return outputs[0]\n\n @staticmethod\n def decode_payload(encoded: bytes) -> dict[str, list]:\n return orjson.loads(b64decode(encoded))\n\n @staticmethod\n def encode_payload(payload: dict[str, list]) -> str:\n return b64encode(orjson.dumps(payload)).decode()\n\n def exec(self, keys: list[str], datum: Datum) -> Messages:\n mlserve_request = orjson.loads(datum.value)\n _LOGGER.debug(\"Input: %s\", mlserve_request)\n\n try:\n payload = self.decode_payload(mlserve_request[\"payload\"])\n except KeyError:\n raise RuntimeError(\"Payload not found in request!\") from None\n\n x = np.asarray(payload[\"data\"], dtype=np.float32)\n y = self.compute(x)\n\n mlserve_request[\"payload\"] = self.encode_payload(\n {\n \"data\": y.tolist(),\n \"model_key\": self.model_key,\n }\n )\n\n _LOGGER.debug(\"Output: %s\", mlserve_request)\n return Messages(Message(orjson.dumps(mlserve_request), keys=keys))\n","repo_name":"numaproj-labs/numaserve","sub_path":"serve/src/udf.py","file_name":"udf.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71107064438","text":"from phdi.containers.base_service import BaseService\nfrom app.routers import (\n fhir_harmonization_standardization,\n fhir_geospatial,\n fhir_linkage_link,\n fhir_transport_http,\n cloud_storage,\n)\nfrom app.config import get_settings\nfrom pathlib import Path\n\n# Read settings immediately to fail fast in case there are invalid values.\nget_settings()\n\n# Instantiate FastAPI via PHDI's BaseService class\napp = BaseService(\n service_name=\"PHDI Ingestion Service\",\n description_path=Path(__file__).parent.parent / \"description.md\",\n).start()\n\napp.include_router(fhir_harmonization_standardization.router)\napp.include_router(fhir_geospatial.router)\napp.include_router(fhir_linkage_link.router)\napp.include_router(fhir_transport_http.router)\napp.include_router(cloud_storage.router)\n","repo_name":"CDCgov/phdi","sub_path":"containers/ingestion/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"4"} +{"seq_id":"29674055429","text":"def intersection(a, b):\n set_items = set(a)\n # result = []\n # for el in b:\n # if el in set_items:\n # result.append(el)\n\n val = [ el for el in b if el in set_items]\n return sorted(val)\n\nprint(intersection([4,2,1,6], [3,6,9,2,10])) # -> [2,6]\nprint(intersection([2,4,6], [4,2])) # -> [2,4]","repo_name":"ThiagoDe/Python_structy","sub_path":"array_string/intersection.py","file_name":"intersection.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"27027372415","text":"import logging\nimport math\nimport multiprocessing as mp\nimport os\nimport pickle\nimport sys\nfrom glob import glob\nfrom itertools import cycle\nfrom random import sample\n\nimport pandas as pd\nimport allel\nimport numpy as np\nfrom numpy.random import default_rng\nfrom tqdm import tqdm\n\nfrom timesweeper.utils import snp_utils as su\nfrom timesweeper.utils.gen_utils import (get_rep_id,\n get_scenario_from_filename,\n read_config)\nfrom timesweeper.utils.hap_utils import getTSHapFreqs, haps_to_strlist\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"make_training_feats\")\nlogger.setLevel(\"INFO\")\n\nimport warnings\n\nwarnings.filterwarnings(\"error\")\n\n\ndef draw_rand_center_offset(max_offset=700):\n rng = default_rng(np.random.seed(int.from_bytes(os.urandom(4), byteorder=\"little\")))\n return int(rng.uniform(-max_offset, max_offset, 1)[0])\n\n\ndef add_missingness(data, m_rate, nan_val=-1):\n rng = default_rng(np.random.seed(int.from_bytes(os.urandom(4), byteorder=\"little\")))\n missing = rng.binomial(1, m_rate, data.shape)\n data[missing == 1] = nan_val\n\n return data\n\n\ndef check_freq_increase(ts_afs, min_increase=0.25):\n \"\"\"Quick test to make sure a given example is increasing properly, can be used to filter training.\"\"\"\n center = int(ts_afs.shape[0] / 2)\n if ts_afs[-1, center] - ts_afs[0, center] >= min_increase:\n return True\n else:\n return False\n\n\ndef subsample_tps(inds_per_tp, og_tps, num_sampled_tps):\n \"\"\"Subsamples timepoints during VCF by sampling samples list.\"\"\"\n samples_list = []\n if num_sampled_tps == 1:\n t = og_tps + 1 # VCFs merged samples are 1-indexed, get last\n samples_list.extend([f\"{t}:i{i}\" for i in range(inds_per_tp)])\n elif num_sampled_tps == 2:\n # This covers the first sampling point\n samples_list.extend([f\"i{i}\" for i in range(inds_per_tp)])\n t = og_tps + 1 # VCFs merged samples are 1-indexed, get last\n samples_list.extend([f\"{t}:i{i}\" for i in range(inds_per_tp)])\n else:\n tps = np.linspace(1, og_tps, num_sampled_tps, dtype=int)\n # This covers the first sampling point\n samples_list.extend([f\"i{i}\" for i in range(inds_per_tp)])\n # This gets the rest\n for t in tps[1:]:\n samples_list.extend([f\"{t}:i{i}\" for i in range(inds_per_tp)])\n\n return samples_list\n\n\ndef subsample_inds(inds_per_tp, subsample_size, num_tps):\n \"\"\"Subsamples individuals from each timepoint to re-use simulations.\"\"\"\n samples_list = []\n samples_list.extend(\n [f\"i{i}\" for i in np.random.choice(inds_per_tp, subsample_size, replace=False)]\n )\n for s in range(2, num_tps + 1):\n samples_list.extend(\n [\n f\"{s}:i{i}\"\n for i in np.random.choice(inds_per_tp, subsample_size, replace=False)\n ]\n )\n return samples_list\n\n\ndef get_window_idxs(center_idx, win_size):\n \"\"\"\n Gets the win_size number of snps around a central snp.\n\n Args:\n center_idx (int): Index of the central SNP to use for the window.\n win_size (int): Size of window to use around the SNP, optimally odd number.\n\n Returns:\n list: Indices of all SNPs to grab for the feature matrix.\n \"\"\"\n half_window = math.floor(win_size / 2)\n return list(range(center_idx - half_window, center_idx + half_window + 1))\n\n\ndef prep_ts_aft(genos, samp_sizes):\n \"\"\"\n Iterates through timepoints and creates MAF feature matrices.\n\n Args:\n genos (allel.GenotypeArray): Genotype array containing all timepoints.\n samp_sizes (list[int]): Number of chromosomes sampled at each timepoint.\n\n Returns:\n np.arr: MAF array to use for predictions. Shape is (timepoints, MAF).\n \"\"\"\n # Prep genos into time-series format and calculate Maft\n ts_genos = su.split_arr(genos, samp_sizes)\n\n min_alleles, first_genos, last_genos = su.get_vel_minor_alleles(\n ts_genos, np.max(genos)\n )\n\n ts_maft = []\n for timepoint in ts_genos:\n _genos = []\n _genotypes = allel.GenotypeArray(timepoint).count_alleles(\n max_allele=min_alleles.max()\n )\n\n for snp, min_allele_idx in zip(_genotypes, min_alleles):\n maf = su.calc_maft(snp, min_allele_idx)\n _genos.append(maf)\n\n ts_maft.append(_genos)\n\n return np.stack(ts_maft)\n\n\ndef get_aft_central_window(\n snps, genos, samp_sizes, win_size, missingness, mut_types, offset\n):\n \"\"\"\n Check for whether a non-control mutation type is present. If not, return the central-most mutation.\n\n Args:\n snps (list[tup(chrom, pos, mut)]): Tuples of information for each SNP.\n genos (allel.GenotypeArray): Genotypes of all samples.\n samp_sizes (list[int]): Number of chromosomes sampled at each timepoint.\n win_size (int): Number of SNPs to use for each prediction. Needs to match how NN was trained.\n scenario (str): Entry from the scenarios config option.\n missingness (float): Parameter of binomial distribution to pull missingness from.\n mut_types (list[int]): List of mutation types that are not considered the \"control\" case.\n Returns:\n np.arr: The central-most window, either based on mutation type or closest to half size of chrom.\n float: Selection coefficient.\n \"\"\"\n ts_aft = prep_ts_aft(genos, samp_sizes)\n\n buffer = int(win_size / 2)\n centers = range(buffer, len(snps) - buffer)\n\n center_idx = int(len(centers) / 2)\n\n try:\n for center in centers:\n if snps[center][2] in mut_types:\n center_idx = center\n break\n else:\n pass\n except:\n pass\n\n sel_coeff = snps[center_idx][3]\n\n if offset:\n rng = default_rng(\n np.random.seed(int.from_bytes(os.urandom(4), byteorder=\"little\"))\n )\n if rng.uniform(0, 3, 1)[0] > 2:\n rand_offset = draw_rand_center_offset()\n center_idx += rand_offset\n else:\n rand_offset = 0\n else:\n rand_offset = 0\n\n win_idxs = get_window_idxs(center_idx, win_size)\n\n window = ts_aft[:, win_idxs]\n center_aft = window\n\n missing_center_aft = add_missingness(\n center_aft, m_rate=missingness\n ) # If no missingness, will just return\n\n return missing_center_aft, sel_coeff, rand_offset\n\n\ndef get_hft_central_window(snps, haps, samp_sizes, win_size, mut_types, offset):\n \"\"\"\n Iterates through windows of MAF time-series matrix and gets the central window.\n Does not have as many utility functions as AFT such as missingness and variable sorting methods.\n\n Args:\n snps (list[tup(chrom, pos, mut)]): Tuples of information for each SNP.\n haps (np.arr): Haplotypes of all samples.\n samp_sizes (list[int]): Number of chromosomes sampled at each timepoint.\n win_size (int): Number of SNPs to use for each prediction. Needs to match how NN was trained.\n Returns:\n np.arr: The central-most window, either based on mutation type or closest to half size of chrom.\n \"\"\"\n buffer = int(win_size / 2)\n centers = range(buffer, len(snps) - buffer)\n\n center_idx = int(len(centers) / 2)\n\n try:\n for center in centers:\n if snps[center][2] in mut_types:\n center_idx = center\n break\n else:\n pass\n except:\n pass\n\n sel_coeff = snps[center_idx][3]\n\n if offset:\n rng = default_rng(\n np.random.seed(int.from_bytes(os.urandom(4), byteorder=\"little\"))\n )\n if rng.uniform(1, 3, 1)[0] > 2:\n rand_offset = draw_rand_center_offset()\n center_idx += rand_offset\n\n else:\n rand_offset = 0\n else:\n rand_offset = 0\n\n win_idxs = get_window_idxs(center_idx, win_size)\n window = np.swapaxes(haps[win_idxs, :], 0, 1)\n str_window = haps_to_strlist(window)\n central_hfs = getTSHapFreqs(str_window, samp_sizes)\n\n return central_hfs, sel_coeff, rand_offset\n\n\ndef aft_worker(\n in_vcf,\n mut_types,\n scenarios,\n samp_sizes,\n samps_list,\n win_size,\n offset,\n missingness,\n verbose=False,\n params=None,\n):\n benchmark = True # Want to get all the info we can from sims in training\n try:\n id = get_rep_id(in_vcf)\n scenario = get_scenario_from_filename(in_vcf, scenarios)\n\n vcf = su.read_vcf(in_vcf, samps_list, benchmark)\n genos, snps = su.vcf_to_genos(vcf, benchmark)\n\n central_aft, sel_coeff, rand_offset = get_aft_central_window(\n snps, genos, samp_sizes, win_size, missingness, mut_types, offset\n )\n \n if params is not None:\n sel_coeff = params[(params[\"rep\"] == int(id)) & (params[\"sweep\"] == scenario)][\"selCoeff\"].values[0]\n\n if \"neut\" not in scenario.lower() and sel_coeff == 0.0:\n raise Exception\n\n return id, scenario, central_aft, sel_coeff, rand_offset\n\n except UserWarning as Ue:\n print(Ue)\n return None\n except Exception as e:\n if verbose:\n logger.warning(f\"Could not process {in_vcf}\")\n logger.warning(f\"Exception: {e}\")\n sys.stdout.flush()\n sys.stderr.flush()\n return None\n\n\ndef hft_worker(\n in_vcf,\n mut_types,\n scenarios,\n samp_sizes,\n samps_list,\n win_size,\n offset,\n ploidy=2,\n verbose=False,\n params=None,\n):\n benchmark = True\n try:\n id = get_rep_id(in_vcf)\n scenario = get_scenario_from_filename(in_vcf, scenarios)\n\n vcf = su.read_vcf(in_vcf, samps_list, benchmark)\n haps, snps = su.vcf_to_haps(vcf, benchmark)\n\n central_hft, sel_coeff, rand_offset = get_hft_central_window(\n snps, haps, [ploidy * i for i in samp_sizes], win_size, mut_types, offset\n )\n\n if params is not None:\n sel_coeff = params[(params[\"rep\"] == int(id)) & (params[\"sweep\"] == scenario)][\"selCoeff\"].values[0]\n\n return id, scenario, central_hft, sel_coeff, rand_offset\n\n except UserWarning as Ue:\n # print(Ue)\n return None\n\n except Exception as e:\n if verbose:\n logger.warning(f\"Could not process {in_vcf}\")\n logger.warning(f\"Exception: {e}\")\n sys.stdout.flush()\n sys.stderr.flush()\n return None\n\n\ndef main(ua):\n yaml_data = read_config(ua.yaml_file)\n scenarios, mut_types, work_dir, samp_sizes, ploidy, win_size, threads = (\n yaml_data[\"scenarios\"],\n yaml_data[\"mut types\"],\n yaml_data[\"work dir\"],\n yaml_data[\"sample sizes\"],\n yaml_data[\"ploidy\"],\n yaml_data[\"win_size\"],\n ua.threads,\n )\n if ua.paramsfile:\n params = pd.read_csv(ua.paramsfile, sep=\"\\t\")\n else:\n params = None\n \n if ua.subsample_inds:\n if ua.subsample_tps:\n logger.error(\"Can't subsample both timepoints and individuals.\")\n\n samps_list = subsample_inds(samp_sizes[0], ua.subsample_inds, len(samp_sizes))\n else:\n samps_list = None\n\n if ua.subsample_tps:\n if not ua.og_tps:\n logger.error(\n \"Must provide original simulation number of tps in order to properly subset.\"\n )\n\n samps_list = subsample_tps(samp_sizes[0], ua.og_tps, ua.subsample_tps)\n\n if ua.allow_shoulders:\n offset = int(ua.allow_shoulders)\n else:\n offset = 0\n\n filelist = glob(f\"{work_dir}/vcfs/*/*/merged.vcf\", recursive=True)\n\n aft_work_args = zip(\n filelist,\n cycle([mut_types]),\n cycle([scenarios]),\n cycle([samp_sizes]),\n cycle([samps_list]),\n cycle([win_size]),\n cycle([offset]),\n cycle([ua.missingness]),\n cycle([ua.verbose]),\n cycle([params]),\n )\n hft_work_args = zip(\n filelist,\n cycle([mut_types]),\n cycle([scenarios]),\n cycle([samp_sizes]),\n cycle([samps_list]),\n cycle([win_size]),\n cycle([offset]),\n cycle([int(ploidy)]),\n cycle([ua.verbose]),\n cycle([params]),\n )\n print(\"[INFO] Starting run\")\n debug = False\n if debug:\n aft_work_res = []\n for i in tqdm(aft_work_args, desc=\"AFT\", total=len(filelist)):\n aft_work_res.append(aft_worker(*i))\n \n hft_work_res = []\n for i in tqdm(hft_work_args, desc=\"HFT\", total=len(filelist)):\n hft_work_res.append(hft_worker(*i))\n\n else:\n pool = mp.Pool(threads)\n if ua.no_progress:\n aft_work_res = pool.starmap(aft_worker, aft_work_args, chunksize=4,)\n\n if ua.hft:\n hft_work_res = pool.starmap(hft_worker, hft_work_args, chunksize=4,)\n\n pool.close()\n else:\n aft_work_res = pool.starmap(\n aft_worker,\n tqdm(\n aft_work_args,\n desc=\"Formatting AFT training data\",\n total=len(filelist),\n ),\n chunksize=4,\n )\n if ua.hft:\n hft_work_res = pool.starmap(\n hft_worker,\n tqdm(\n hft_work_args,\n desc=\"Formatting HFT training data\",\n total=len(filelist),\n ),\n chunksize=4,\n )\n pool.close()\n\n pickle_dict = {}\n for s in scenarios:\n pickle_dict[s] = {}\n\n for res in aft_work_res:\n if res:\n rep, scenario, aft, s, off = res\n\n pickle_dict[scenario][rep] = {}\n pickle_dict[scenario][rep][\"aft\"] = aft\n pickle_dict[scenario][rep][\"sel_coeff\"] = s\n pickle_dict[scenario][rep][\"center_offset\"] = off\n\n if ua.hft:\n for res in hft_work_res:\n try:\n if res:\n rep, scenario, hft, s, off = res\n if rep in pickle_dict[scenario].keys():\n pickle_dict[scenario][rep][\"hft\"] = hft\n pickle_dict[scenario][rep][\"sel_coeff\"] = s\n pickle_dict[scenario][rep][\"center_offset\"] = off\n\n except KeyError as e:\n print(e)\n print(res)\n pass\n\n with open(ua.outfile, \"wb\") as outfile:\n pickle.dump(pickle_dict, outfile)\n","repo_name":"SchriderLab/Timesweeper","sub_path":"timesweeper/make_training_features.py","file_name":"make_training_features.py","file_ext":"py","file_size_in_byte":14499,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"4"} +{"seq_id":"14764240701","text":"\"\"\"Building pygame, and doing stuff when things are merged.\n\ngithub.com related webhook integration.\n\"\"\"\nimport os\nimport json\nfrom subprocess import run\nimport hmac\nimport hashlib\nfrom flask import request, abort\n\n\ngithub_url = os.environ.get('APP_GITHUB_WEBHOOK_URL', '/github')\ngithub_secret = bytes(os.environ.get('APP_GITHUB_WEBHOOK_SECRET', ''), 'utf8')\n\ndef verify_github(secret, body, x_hub_signature):\n \"\"\" https://developer.github.com/webhooks/securing/\n \"\"\"\n signature = 'sha1=' + hmac.new(secret, body, hashlib.sha1).hexdigest()\n return signature == x_hub_signature\n\ndef add_builds(app):\n \"\"\" to the app.\n \"\"\"\n\n @app.route(github_url, methods=['POST', 'GET'])\n def github_webhook(**kwargs):\n \"\"\"Called by github when something happens.\n\n When something goes on master branch, we:\n - build the documentation,\n - update the launchpad badge.\n\n https://developer.github.com/webhooks/\n \"\"\"\n if not verify_github(github_secret,\n request.get_data(),\n request.headers.get('X-Hub-Signature', '')):\n abort(404)\n\n payload = request.form.get('payload', None)\n if payload:\n data = json.loads(payload)\n if hasattr(data, 'get'):\n if data.get('ref', None) == 'refs/heads/master':\n run(['pygameweb_update_docs'])\n run(['pygameweb_launchpad'])\n return 'sweet'\n","repo_name":"pygame/pygameweb","sub_path":"pygameweb/builds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"4"} +{"seq_id":"73645605877","text":"class Node:\n def __init__(self,data=None):\n self.data = data\n self.next = None\n\ndef printList(head):\n curr = head\n llstr = ' '\n while curr:\n llstr += str(curr.data)+'--->' if curr.next else str(curr.data)\n curr = curr.next\n\n print(llstr)\ndef merge_sorted(head1,head2):\n dummy = Node()\n tail = dummy\n while head1 and head2:\n if head1.data < head2.data:\n tail.next = head1\n head1= head1.next\n else:\n tail.next = head2\n head2 = head2.next\n tail = tail.next\n if head1:\n tail.next = head1\n if head2:\n tail.next = head2\n return dummy.next\n\nhead1 = Node(10)\nhead1.next = Node(20)\nhead1.next.next = Node(30)\nhead2 = Node(11)\nhead2.next = Node(19)\nhead2.next.next = Node(45)\nprintList(head1)\nprintList(head2)\nnew_head = merge_sorted(head1,head2)\n\nprintList(new_head)\n","repo_name":"arunpoy/python_code","sub_path":"Linkedlist/mergelists1.py","file_name":"mergelists1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38956535185","text":"#\n# @lc app=leetcode id=49 lang=python3\n#\n# [49] Group Anagrams\n#\n\n# @lc code=start\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n hashmap = defaultdict(list)\n for s in strs:\n counts = [0] * 26\n for c in s:\n counts[ord(c) - ord('a')] += 1\n hashmap[tuple(counts)].append(s)\n return(hashmap.values())\n\n# @lc code=end\n\n","repo_name":"seyys/leetcode","sub_path":"49.group-anagrams.py","file_name":"49.group-anagrams.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8020093274","text":"# 연산자 운선순위\nimport copy\nimport itertools\nfrom collections import deque\n\n\ndef solution(expression):\n # 연산자 우선순위\n priority = list(itertools.permutations(['+', '-', '*'], 3))\n print(priority)\n\n # 데이터 추출\n data = deque()\n operator = deque()\n cnt = 0\n for i in range(len(expression)):\n if expression[i] == '-' or expression[i] == '+' or expression[i] == '*':\n data.append(int(expression[cnt:i]))\n operator.append((expression[i]))\n cnt = i+1\n data.append(int(expression[cnt:i+1]))\n # 연산 진행\n data_backup = copy.deepcopy(data)\n operator_backup = copy.deepcopy(operator)\n MAX = 0\n for pri in priority:\n data = copy.deepcopy(data_backup)\n operator = copy.deepcopy(operator_backup)\n for command in pri:\n while True:\n if command in operator:\n idx = operator.index(command)\n if command == '+':\n data[idx] += data[idx+1]\n elif command == '-':\n data[idx] -= data[idx+1]\n else:\n data[idx] *= data[idx+1]\n del data[idx+1]\n del operator[idx]\n else:\n break;\n MAX = max(MAX,abs(data[0]))\n return MAX\nexpression = input()\nprint(solution(expression))\n","repo_name":"MyaGya/Python_Practice","sub_path":"Programers_backup/intern_2020_2.py","file_name":"intern_2020_2.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12268776182","text":"import pandas as pd\n\ninicio = int(input(\"Por favor ingrese el año incial de las ventas: \"))\nfin = int(input(\"Por favor ingrese el año final de las ventas: \"))\nventas ={}\n\nfor i in range (inicio, fin+1):\n ventas[i]=float(input(f\"Ingrese las ventas de {i}: \"))\n\nventas = pd.Series(ventas)\nprint()\nprint(\"Ventas por años \\n\")\nprint(f\"ventas con descuento \\n {ventas*0.9}\")\n","repo_name":"NickArregoces/Mision_Tic","sub_path":"Ciclo1/Unidad4/pandas7.py","file_name":"pandas7.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30737616869","text":"'''Input validation module\r\n\r\nContains some functions that validate user input'''\r\n\r\ndef int_input_validation(message, low_limit=1, high_limit=65536, error_message=\"Incorrect input!\"):\r\n '''User input validation\r\n\r\nThe function checks if user input meets set requirements'''\r\n user_input = 0\r\n #we ask user to enter an integer number between low_limit and high_limit\r\n #until he enters correctly\r\n while True:\r\n user_input = input(message)\r\n #if user input meets the requirments...\r\n if user_input.isdecimal() and (int(user_input) in range(low_limit,high_limit+1)):\r\n #we return user input\r\n return int(user_input)\r\n else:\r\n #otherwise we print the error message and continue the loop\r\n print(error_message)","repo_name":"xASiDx/other-side","sub_path":"input_validation.py","file_name":"input_validation.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36289913903","text":"from picamera import PiCamera\nfrom time import sleep\nfrom datetime import datetime\ncamera = PiCamera()\nnow = datetime.now()\nfrom sense_hat import SenseHat\nsense = SenseHat()\nsense.set_rotation(180)\ncamera.rotation = 90\ncamera.start_preview()\nsense.clear((100,100,100))\nsleep(1)\nsense.show_letter(\"3\", text_colour=[255,255,255],back_colour=[20,20,20])\nsleep(1)\nsense.show_letter(\"2\", text_colour=[255,255,255],back_colour=[20,20,20])\nsleep(1)\nsense.show_letter(\"1\", text_colour=[255,255,255],back_colour=[20,20,20])\nsleep(1)\nsense.clear()\n\nfilename = \"{0:%Y}-{0:%m}-{0:%d}-{0:%H}-{0:%M}-{0:%S}.jpg\".format(now)\ncamera.capture('/home/pi/Desktop/weddingtrialpics/{0}'.format(filename))\ncamera.stop_preview()\n","repo_name":"colinkillmer/daughter","sub_path":"cameralearning.py","file_name":"cameralearning.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71952234356","text":"class Delegator:\n\n def __init__(self, delegate=None):\n self.delegate = delegate\n self.__cache = set()\n # Cache is used to only remove added attributes\n # when changing the delegate.\n\n def __getattr__(self, name):\n attr = getattr(self.delegate, name) # May raise AttributeError\n setattr(self, name, attr)\n self.__cache.add(name)\n return attr\n\n def resetcache(self):\n \"Removes added attributes while leaving original attributes.\"\n # Function is really about resetting delegator dict\n # to original state. Cache is just a means\n for key in self.__cache:\n try:\n delattr(self, key)\n except AttributeError:\n pass\n self.__cache.clear()\n\n def setdelegate(self, delegate):\n \"Reset attributes and change delegate.\"\n self.resetcache()\n self.delegate = delegate\n\nif __name__ == '__main__':\n from unittest import main\n main('idlelib.idle_test.test_delegator', verbosity=2)\n","repo_name":"python/cpython","sub_path":"Lib/idlelib/delegator.py","file_name":"delegator.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":56926,"dataset":"github-code","pt":"4"} +{"seq_id":"23402336569","text":"import socket\nimport time\nimport random\nimport pickle\n\nHEADERSIZE = 10\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind(('localhost', 5001))\nserver_socket.listen(10)\n\nprint(\"[LISTENING...] Server is listening on localhost:5000\")\n\nwhile True:\n client_socket, address = server_socket.accept()\n print(f'[Server] Connection from {address} has been established!')\n\n # MESSAGE READING STARTS\n\n l = ['hello', 'people']\n msg = pickle.dumps(l)\n # demoMESSAGE READING ENDS\n\n msg_length = bytes(f'{len(msg):<{HEADERSIZE}}', 'utf-8') # HEADER\n msg = msg_length + msg # Actual Message with HEADER\n\n # WRITING END\n client_socket.send(msg)\n\n\n\n","repo_name":"ahmedfahad04/Python-Lessons","sub_path":"Python Socket Programming/Youtube_sentdx/Operations/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30904959078","text":"import sys\nfrom collections import deque\n\nclass Monkey:\n items: deque[int] # the items this monkey is currently holding\n count: int # how many times this monkey inspected an item\n __op = lambda old: old # maps an old worry level to a new worry level\n __test = lambda wl: 0 # maps a worry level to the monkey ID that should receive this item\n\n def __init__(self, items: list[int], op, test):\n self.items = deque(items)\n self.__op = op\n self.__test = test\n self.count = 0\n \n def perform_op(self, old: int):\n return self.__op(old)\n\n def perform_test(self, wl: int):\n return self.__test(wl)\n\n\n\ndef parse_op_str(op_str: str):\n return lambda old: eval(op_str)\n\ndef get_test_lambda(divisible_by: int, if_true_id: int, if_false_id: int):\n return lambda wl: if_true_id if (wl % divisible_by == 0) else if_false_id\n\ndef get_monkeys(lines: list[str]):\n monkeys: list[Monkey] = []\n \n for i in range(1, len(lines), 7):\n items = list( map(int, lines[i][16::].split(\", \")) )\n\n op = parse_op_str(lines[i+1][17::])\n\n divisible_by = int(lines[i+2][19::])\n if_true_id = int(lines[i+3][25::])\n if_false_id = int(lines[i+4][26::])\n # test = lambda wl: if_true_id if (wl % divisible_by == 0) else if_false_id\n test = get_test_lambda(divisible_by, if_true_id, if_false_id)\n\n new_monkey = Monkey(items, op, test)\n monkeys.append(new_monkey)\n\n return monkeys\n\ndef perform_round(monkeys: list[Monkey]):\n for monkey in monkeys:\n while len(monkey.items) > 0:\n monkey.count += 1\n wl = monkey.items.popleft()\n wl = monkey.perform_op(wl)\n wl //= 3\n new_id = monkey.perform_test(wl)\n monkeys[new_id].items.append(wl)\n\ndef main():\n lines = sys.stdin.readlines()\n lines = [l.strip() for l in lines]\n monkeys = get_monkeys(lines)\n \n for _ in range(20):\n perform_round(monkeys)\n \n counts: list[int] = []\n for i in range(len(monkeys)):\n print(f\"Monkey {i} inspected items {monkeys[i].count} times.\")\n counts.append(monkeys[i].count)\n print()\n\n counts.sort(reverse=True)\n print(f\"monkey business: {counts[0] * counts[1]}\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"olafur-andri/AdventOfCode","sub_path":"AoC_2022/11_monkey_in_the_middle/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72407015156","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 11:00:57 2020\n\n@author: kartikeysinha\n\"\"\"\n\n#import libraries\nimport pandas as pd\nimport numpy as np\n\n\n#converting csv to txt files\nimport csv\n\nwith open('/Users/kartikeysinha/Desktop/projects/disaster tweets/train.csv','r') as csvin, open('train.txt', 'w') as tsvout:\n csvin = csv.reader(csvin)\n tsvout = csv.writer(tsvout, delimiter='\\t')\n\n for row in csvin:\n tsvout.writerow(row)\n\n\nwith open('/Users/kartikeysinha/Desktop/projects/disaster tweets/test.csv','r') as csvin, open('test.txt', 'w') as tsvout:\n csvin = csv.reader(csvin)\n tsvout = csv.writer(tsvout, delimiter='\\t')\n\n for row in csvin:\n tsvout.writerow(row)\n\n\n#import train dataset\ndataset_train = pd.read_csv('train.txt', sep='\\t', quoting = 3)\ndataset_train['keyword'] = dataset_train['keyword'].fillna('no_keyword')\ndataset_train['location'] = dataset_train['location'].fillna('no_location')\ndataset_train['text'] = dataset_train['text'].fillna('no_text')\ndataset_train = dataset_train.dropna(axis=0, how='any')\nX_train = dataset_train.iloc[:, -2].values\ny_train = dataset_train.iloc[:, -1].values\n\n\n#clean the data for training set\nimport re\nimport nltk\n#nltk.download('stopwords')\nfrom nltk.corpus import stopwords #list of non-necessary words\nfrom nltk.stem.porter import PorterStemmer #used to convert words to stem word\n\ncolumns = len(X_train)\ncorpus_train = []\n\nfor i in range(0, columns):\n tweet = re.sub('[^a-zA-Z]', ' ', X_train[i])\n tweet = tweet.lower()\n tweet = tweet.split()\n ps = PorterStemmer()\n stops = set(stopwords.words('english'))\n tweet = [ps.stem(word) for word in tweet if not word in stops]\n tweet = ' '.join(tweet)\n corpus_train.append(tweet)\n\n\n#Bag of Words Model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features = 2000)\nX_train = cv.fit_transform(corpus_train).toarray()\n\n\n#train model through Random Forest Classification\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier = RandomForestClassifier(n_estimators=20, criterion=\"entropy\")\nclassifier.fit(X_train, y_train)\n\n\n#import test dataset\ndataset_test = pd.read_csv('test.txt', sep='\\t', quoting = 3)\ndataset_test['keyword'] = dataset_test['keyword'].fillna('no_keyword')\ndataset_test['location'] = dataset_test['location'].fillna('no_location')\ndataset_test['text'] = dataset_test['text'].fillna('no_text')\nX_test = dataset_test.iloc[:, -1].values\nX_test_ids = dataset_test.iloc[:, 0].values\n\n\n#clean the data for training set\ncolumns = len(X_test)\ncorpus_test = []\n\nfor i in range(0, columns):\n tweet = re.sub('[^a-zA-Z]', ' ', X_test[i])\n tweet = tweet.lower()\n tweet = tweet.split()\n ps = PorterStemmer()\n stops = set(stopwords.words('english'))\n tweet = [ps.stem(word) for word in tweet if not word in stops]\n tweet = ' '.join(tweet)\n corpus_test.append(tweet)\n\nX_test = np.array(corpus_test)\n\n\n#predict\ny_pred = classifier.predict(cv.transform(X_test))\n\n\n#convert to CSV\ndf = pd.DataFrame({\"Id\" : X_test_ids, \"target\" : y_pred})\ndf.to_csv(\"submission.csv\", index=False)\n\n","repo_name":"kartikeysinha/Disaster_tweets","sub_path":"disaster_twitter_nlp.py","file_name":"disaster_twitter_nlp.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26653365664","text":"if( 1<2):\n print(\"YES!!\")\n\n\n#for loop\n# seq = [1,5,2,3,4,2,5]\n# for item in seq:\n# if item == 5:\n# print(\"Nothing\")\n# else:\n# print(item)\n\n\n\n# mypairs = [(1,3),(3,4),(5,7)]\n# for item in mypairs:\n# print(item)\n#\n# for tup1, tup2 in mypairs:\n# print(tup1)\n\n\n\n# for item in range(10):\n# print(item)\n\nseq = [5,4,1,5,3,2]\nmylist = []\nfor item in seq:\n mylist.append(item**2)\n\nprint(mylist)\n","repo_name":"sabbirhossain540/python_part_one","sub_path":"control_flow.py","file_name":"control_flow.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"32653155021","text":"import pygame as pygame\nfrom pygame.font import Font\n\nBLACK_COLOR = pygame.Color('black')\nWHITE_COLOR = pygame.Color('white')\nGRAY_COLOR = pygame.Color('dimgray')\nBLUE_COLOR = pygame.Color('blue')\nDARK_GREEN_COLOR = pygame.Color('forestgreen')\nGREEN_COLOR = pygame.Color('green')\nRED_COLOR = pygame.Color('red')\n\nFPS = 7\n\nTILE = 50\nW_TILES, H_TILES = 25, 15\nSCREEN_SIZE = WIDTH, HEIGHT = W_TILES * TILE, H_TILES * TILE\n\ndef draw(screen):\n screen.fill(BLACK_COLOR)\n\n # vertical lines\n for x in range(0, WIDTH, TILE):\n pygame.draw.line(screen, GRAY_COLOR, (x, 0), (x, HEIGHT))\n\n # horizontal lines\n for y in range(0, HEIGHT, TILE):\n pygame.draw.line(screen, GRAY_COLOR, (0, y), (WIDTH, y))\n\n # debug text\n text_surface = font.render(f\"fps: {int(clock.get_fps())}\", 1, WHITE_COLOR)\n screen.blit(text_surface, (5, 5))\n\n\npygame.init()\npygame.display.set_caption(\"Змея\")\nscreen = pygame.display.set_mode(SCREEN_SIZE)\nfont = Font(None, 24)\nclock = pygame.time.Clock()\n\nis_running = True\nwhile is_running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n is_running = False\n\n draw(screen)\n pygame.display.flip()\n clock.tick(FPS)\n\npygame.quit()","repo_name":"denorlov/PyGame","sub_path":"03_advanced/Snake/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"37368878438","text":"import sys\nimport subprocess\n\n# from fHDHR.exceptions import TunerError\n\n\nclass FFMPEG_Stream():\n\n def __init__(self, fhdhr, stream_args, tuner):\n self.fhdhr = fhdhr\n self.stream_args = stream_args\n self.tuner = tuner\n\n self.bytes_per_read = int(self.fhdhr.config.dict[\"ffmpeg\"][\"bytes_per_read\"])\n self.ffmpeg_command = self.ffmpeg_command_assemble(stream_args)\n\n def get(self):\n\n ffmpeg_proc = subprocess.Popen(self.ffmpeg_command, stdout=subprocess.PIPE)\n\n def generate():\n try:\n while self.tuner.tuner_lock.locked():\n\n chunk = ffmpeg_proc.stdout.read(self.bytes_per_read)\n if not chunk:\n break\n # raise TunerError(\"807 - No Video Data\")\n yield chunk\n chunk_size = int(sys.getsizeof(chunk))\n self.tuner.add_downloaded_size(chunk_size)\n self.fhdhr.logger.info(\"Connection Closed: Tuner Lock Removed\")\n\n except GeneratorExit:\n self.fhdhr.logger.info(\"Connection Closed.\")\n except Exception as e:\n self.fhdhr.logger.info(\"Connection Closed: \" + str(e))\n finally:\n ffmpeg_proc.terminate()\n ffmpeg_proc.communicate()\n ffmpeg_proc.kill()\n self.fhdhr.logger.info(\"Connection Closed: Tuner Lock Removed\")\n self.tuner.close()\n # raise TunerError(\"806 - Tune Failed\")\n\n return generate()\n\n def ffmpeg_command_assemble(self, stream_args):\n ffmpeg_command = [\n self.fhdhr.config.dict[\"ffmpeg\"][\"path\"],\n \"-i\", stream_args[\"channelUri\"],\n ]\n ffmpeg_command.extend(self.ffmpeg_duration(stream_args))\n ffmpeg_command.extend(self.transcode_profiles(stream_args))\n ffmpeg_command.extend(self.ffmpeg_loglevel())\n ffmpeg_command.extend([\"pipe:stdout\"])\n return ffmpeg_command\n\n def ffmpeg_duration(self, stream_args):\n ffmpeg_command = []\n if stream_args[\"duration\"]:\n ffmpeg_command.extend([\"-t\", str(stream_args[\"duration\"])])\n else:\n ffmpeg_command.extend(\n [\n \"-reconnect\", \"1\",\n \"-reconnect_at_eof\", \"1\",\n \"-reconnect_streamed\", \"1\",\n \"-reconnect_delay_max\", \"2\",\n ]\n )\n\n return ffmpeg_command\n\n def ffmpeg_loglevel(self):\n ffmpeg_command = []\n log_level = self.fhdhr.config.dict[\"logging\"][\"level\"].lower()\n\n loglevel_dict = {\n \"debug\": \"debug\",\n \"info\": \"info\",\n \"error\": \"error\",\n \"warning\": \"warning\",\n \"critical\": \"fatal\",\n }\n if log_level not in [\"info\", \"debug\"]:\n ffmpeg_command.extend([\"-nostats\", \"-hide_banner\"])\n ffmpeg_command.extend([\"-loglevel\", loglevel_dict[log_level]])\n return ffmpeg_command\n\n def transcode_profiles(self, stream_args):\n # TODO implement actual profiles here\n \"\"\"\n • heavy: transcode to AVC with the same resolution, frame-rate, and interlacing as the\n original stream. For example 1080i60 AVC 1080i60, 720p60 AVC 720p60. → →\n • mobile: trancode to AVC progressive not exceeding 1280x720 30fps.\n • internet720: transcode to low bitrate AVC progressive not exceeding 1280x720 30fps.\n • internet480: transcode to low bitrate AVC progressive not exceeding 848x480 30fps for\n 16:9 content, not exceeding 640x480 30fps for 4:3 content.\n • internet360: transcode to low bitrate AVC progressive not exceeding 640x360 30fps for\n 16:9 content, not exceeding 480x360 30fps for 4:3 content.\n • internet240: transcode to low bitrate AVC progressive not exceeding 432x240 30fps for\n 16:9 content, not exceeding 320x240 30fps for 4:3 content\n \"\"\"\n\n if stream_args[\"transcode\"]:\n self.fhdhr.logger.info(\"Client requested a \" + stream_args[\"transcode\"] + \" transcode for stream.\")\n stream_args[\"transcode\"] = None\n\n ffmpeg_command = []\n\n if not stream_args[\"transcode\"]:\n ffmpeg_command.extend(\n [\n \"-c\", \"copy\",\n \"-f\", \"mpegts\",\n ]\n )\n elif stream_args[\"transcode\"] == \"heavy\":\n ffmpeg_command.extend([])\n elif stream_args[\"transcode\"] == \"mobile\":\n ffmpeg_command.extend([])\n elif stream_args[\"transcode\"] == \"internet720\":\n ffmpeg_command.extend([])\n elif stream_args[\"transcode\"] == \"internet480\":\n ffmpeg_command.extend([])\n elif stream_args[\"transcode\"] == \"internet360\":\n ffmpeg_command.extend([])\n elif stream_args[\"transcode\"] == \"internet240\":\n ffmpeg_command.extend([])\n\n return ffmpeg_command\n","repo_name":"deathbybandaid/fHDHR_PlutoTV","sub_path":"fHDHR/device/tuners/stream/ffmpeg_stream.py","file_name":"ffmpeg_stream.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"24285218690","text":"# skip:start\nfrom typegraph import typegraph, Policy, t, Graph\nfrom typegraph.graph.params import Auth, Cors\nfrom typegraph.runtimes.deno import DenoRuntime\n\n# skip:end\n\n\n@typegraph(\n # skip:next-line\n cors=Cors(allow_origin=[\"https://metatype.dev\", \"http://localhost:3000\"]),\n)\ndef jwt_authentication(g: Graph):\n deno = DenoRuntime()\n public = Policy.public()\n\n ctx = t.struct(\n {\"your_own_content\": t.string().optional().from_context(\"your_own_content\")}\n )\n # highlight-next-line\n g.auth(Auth.hmac256(\"custom\"))\n\n g.expose(\n get_context=deno.identity(ctx),\n default_policy=[public],\n )\n","repo_name":"metatypedev/metatype","sub_path":"website/docs/reference/typegate/authentication/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"4"} +{"seq_id":"26499451931","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport csv\nimport shutil\nimport unittest\nfrom io import StringIO\nsys.path.append(\".\")\nfrom annogesiclib.splice_parser import SpliceParser\n\n\nclass TestGff3Parser(unittest.TestCase):\n\n def setUp(self):\n self.example = Example()\n self.s_parser = SpliceParser()\n self.test_folder = \"test_folder\"\n if (not os.path.exists(self.test_folder)):\n os.mkdir(self.test_folder)\n\n def tearDown(self):\n if os.path.exists(self.test_folder):\n shutil.rmtree(self.test_folder)\n\n def test_parser(self):\n splice_fh = StringIO(self.example.splice)\n starts = []\n splices = []\n for entry in self.s_parser.parser(splice_fh):\n starts.append(entry.start)\n splices.append(entry.splice)\n self.assertListEqual(starts, [17647, 20734, 43490, 49952])\n self.assertListEqual(splices, ['splits:1:1:1:N:F', 'splits:1:1:1:C:P',\n 'splits:1:1:1:N:P', 'splits:2:2:2:N:P'])\n\nclass Example(object):\n\n splice = \"\"\"Staphylococcus_aureus_HG003\t17647\t17667\tsplits:1:1:1:N:F\t0\t+\nStaphylococcus_aureus_HG003\t20734\t21396\tsplits:1:1:1:C:P\t0\t+\nStaphylococcus_aureus_HG003\t43490\t43644\tsplits:1:1:1:N:P\t0\t+\nStaphylococcus_aureus_HG003\t49952\t50016\tsplits:2:2:2:N:P\t0\t+\"\"\"\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"Sung-Huan/ANNOgesic","sub_path":"tests/test_splice_parser.py","file_name":"test_splice_parser.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"4"} +{"seq_id":"72376209716","text":"\"\"\"Domasna_5 URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom Italingua.views import home, test, lectures, profile, practice, congratulations, forum, choose, quiz, addQuestion, reply, replies\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('home/', home, name='home'),\r\n path('test/', test, name='test'),\r\n path('lectures/', lectures, name='lectures'),\r\n path('profile/', profile, name='profile'),\r\n path('practice/', practice, name='practice'),\r\n path('congratulations/', congratulations, name='congratulations'),\r\n path('forum', forum, name='forum'),\r\n path('choose', choose, name='choose'),\r\n path('quiz', quiz, name='quiz'),\r\n path('addQuestion', addQuestion, name='addQuestion'),\r\n path('reply', reply, name='reply'),\r\n path('replies', replies, name='replies'),\r\n]\r\n","repo_name":"AnaPejovska/Italingua---Django-Project","sub_path":"Domasna_5/Domasna_5/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6869655597","text":"class Movement:\n def __init__(self):\n\n\n #beginpunt x as van het schaakstuk\n #beginpunt y as van het schaakstuk\n #eindpunt x as van het schaakstuk\n #eindpunt y as van het schaakstuk\n #word een stuk geslagen ja/nee\n #eindpunt x as geslagen stuk\n #eindpunt y as geslagen stuk\n #is er een promotie en welk stuk word het en is het stuk aanwezig eerste getal geeft aan welk stuk het is,\n #is het stuk nog niet geslagen vul dan 0 in. Zo ja vul de y in van de positie van dit stuk\n #het tweede getal geeft aan waar dit stuk staat als hij bestaat\n #rokade 0 is geen, 1 is korte rokade, 2 is lange rokade\n #startx, starty, endx, endy, slag, slagx, slagy, promotie, rokade, beurt in string, posx, posy.\n #posx en posy zijn fixt en worden steeds gereturned voor de volgende stap\n posx = 0\n posy = 0\n posx, posy = self.Set(7,4,8,6,0,0,0,0,\"white\", posx ,posy)\n #posx, posy = self.Set(7,7,7,8,9,4,0,0,\"black\", posx ,posy)\n\n def Set(self, inputstartx, inputstarty, inputendx, inputendy, inputslagx, inputslagy, promotie, rokade, beurt, posx, posy):\n startx, starty, endx, endy, slagx, slagy = self.Omrekenen(inputstartx, inputstarty, inputendx, inputendy, inputslagx, inputslagy, beurt)\n if(rokade == 0):\n \n if(promotie == 0):\n \n if(inputslagx != 0 or inputslagy != 0):\n\n posx, posy = self.slag(endx, endy, posx, posy, slagx, slagy, 0 , beurt)\n moveh = self.Moveh(startx, starty, endx, endy, posx, posx) \n if(moveh == False):\n posx, posy = self.Move(posx, posy, startx, starty, endx, endy, beurt)\n if(promotie != 0):\n posx, posy = self.Move(posx, posy, startx, starty, endx, endy, beurt)\n promotie *= 2\n y3 = promotie\n if(beurt == \"white\"):\n x = 4\n x2 = 2\n y = 18\n y2 = 16\n beurtint = 1\n if(beurt == \"black\"):\n x = 22\n x2 = 24\n y = 0\n y2 = 2\n beurtint = 2\n endx, endy, posx, posy, slagx, slagy, beurt, beurtstring\n posx, posy = self.Slag(endx, endy, posx, posy, slagx, slagy, beurtint, beurt)\n posx, posy = self.Beweegposxy(posx, posy, x2, y3)\n self.Elektromagneet(1)\n posx, posy = self.Beweegposxy(posx, posy, x, posy)\n posx, posy = self.Beweegposxy(posx, posy, posx, y)\n posx, posy = self.Beweegposxy(posx, posy, endx, posy)\n posx, posy = self.Beweegposxy(posx, posy, posx, y2)\n self.Elektromagneet(0)\n \n \n if(rokade == 1):\n x1 = 18\n x2 = 20\n x3 = 16\n if(rokade == 2):\n x1 = 10\n x2 = 6\n x3 = 12 \n if(beurt == \"white\"):\n y1 = 2\n y2 = 0\n if(beurt == \"black\"):\n y1 = 16\n y2 = 18\n if(rokade != 0): \n posx, posy = self.Beweegposxy(posx, posy, 14, y1)\n self.Elektromagneet(1)\n \n posx, posy = self.Beweegposxy(posx, posy, x1, y1)\n self.Elektromagneet(0)\n \n posx, posy = self.Beweegposxy(posx, posy, x2, y1)\n self.Elektromagneet(1)\n \n posx, posy = self.Beweegposxy(posx, posy, x2, y2)\n posx, posy = self.Beweegposxy(posx, posy, x3, y2)\n posx, posy = self.Beweegposxy(posx, posy, x3, y1)\n self.Elektromagneet(0)\n return posx, posy\n\n\n\n \n def slag(self, endx, endy, posx, posy, slagx, slagy, beurt, beurtstring):\n posx, posy = self.Beweegposxy(posx, posy, endx, endy)\n self.Elektromagneet(1)\n posx, posy = self.Beweegposxy(posx, posy, posx, endy + 1)\n\n if(beurt == 0):\n if(beurtstring == \"white\"):\n posx, posy = self.Beweegposxy(posx, posy, 22, posy)\n if(beurtstring == \"black\" ):\n posx, posy = self.Beweegposxy(posx, posy, 4, posy)\n x = slagx\n if(beurt == 1):\n posx, posy = self.Beweegposxy(posx, posy, 4, posy)\n x = 0 \n if(beurt == 2):\n posx, posy = self.Beweegposxy(posx, posy, 22, posy)\n x = 26\n posx, posy = self.Beweegposxy(posx, posy, posx, slagy + 1)\n posx, posy = self.Beweegposxy(posx, posy, x, posy)\n posx, posy = self.Beweegposxy(posx, posy, posx, slagy)\n self.Elektromagneet(0)\n return posx, posy\n\n def Move(self, posx, posy, startx, starty, endx, endy, beurt):\n #zet de elektromagneet onder het juiste stuk\n posx, posy = self.Beweegposxy(posx, posy, startx, starty)\n\n #zet de elektromagneet aan\n self.Elektromagneet(1)\n\n #beweegt de elektromagneet naar de eindbestemming\n posx, posy = self.Beweegposxy(posx, posy, endx, endy)\n\n #zet de elektromagneet uit\n self.Elektromagneet(0)\n return posx, posy\n\n def Moveh(self, startx, starty, endx, endy, posx, posy):\n posx, posy = self.Beweegposxy(posx, posy, startx, starty)\n movementx = endx - startx\n movementy = endy - starty\n if((movementx == 2 and movementy == 4) or (movementx == 4 and movementy == 2)\n or (movementx == 4 and movementy == 2) or (movementx == 4 and movementy == -2)\n or (movementx == 2 and movementy == -4) or (movementx == -2 and movementy == -4)\n or (movementx == -4 and movementy == -2) or (movementx == -4 and movementy == 2)\n or (movementx == -2 and movementy == 4)):\n if(movementx == 2 or movementx == -2):\n x = movementx / 2\n else:\n x = 0\n if(movementx == 4 or movementx == -4):\n x2 = movementx\n else:\n x2 = 0\n if(movementy == 2 or movementy == -2):\n y = movementy / 2\n else:\n y = 0\n if(movementy == 4 or movementy == -4):\n y2 = movementy\n else:\n y2 = 0\n posx, posy = self.Beweegposxy(posx, posy, posx + x, posy + y)\n posx, posy = self.Beweegposxy(posx, posy, posx + x2, posy + y2)\n posx, posy = self.Beweegposxy(posx, posy, posx + x, posy + y)\n\n \n\n def Omrekenen(self, inputstartx, inputstarty, inputendx, inputendy, inputslagx, inputslagy, beurt):\n startx = (inputstartx + 2) * 2\n starty = inputstarty * 2\n \n endx = (inputendx + 2) * 2\n endy = inputendy * 2\n print(\"beurt = \" + str(beurt))\n if(inputslagx == 9):\n if(beurt == \"black\"):\n slagx = 0\n if(beurt == \"white\"):\n slagx = 26\n if(inputslagx == 10):\n if(beurt == \"black\"):\n slagx = 2\n if(beurt == \"white\"):\n slagx = 24\n if(inputslagx == 0):\n slagx = 0\n slagy = inputslagy * 2\n return startx, starty, endx, endy, slagx, slagy\n\n def Beweegposxy(self, posx, posy, x, y):\n movementx = x - posx\n movementy = y - posy\n posx += movementx\n posy += movementy\n \n self.Motorx(movementx)\n self.Motory(movementy)\n print(\"posx = \" + str(posx) + \" posy = \" + str(posy))\n return posx, posy\n\n def Motorx(self, x):\n print(\"motor x-as beweeg \" + str(x))\n\n def Motory(self, y):\n print(\"motor y-as beweeg \" + str(y))\n\n def Elektromagneet(self, status):\n if(status == 0):\n print(\"Deactiveer de elektromagneet\")\n elif(status == 1):\n print(\"Activeer de elektromagneet\")\n else:\n print(\"Error onjuiste data\")\n\nMovement()\n","repo_name":"hansleon/Innovate","sub_path":"movement/bewegingcode v0.17.py","file_name":"bewegingcode v0.17.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3911572885","text":"import os #espara el manejo de archivos y rutas\n\nclass Inicializar():\n basedir = os.path.abspath(os.path.join(__file__,\"../..\"))\n DateFormat='%d/%m/%Y'\n HourFormat=\"%H%M%S\"\n\n #jsonData\n Json =basedir + u'\\pages'\n\n Environment = 'Dev'\n\n if Environment =='Dev':\n user = 'postgres',\n password = 'ADMIN',\n host = '127.0.0.1',\n port = '5432',\n database = 'test_db'\n\n #navegador\n NAVEGADOR =u'CHROME'\n\n #diretorio de evidencia\n Path_Evidencias = basedir +u'/data/capturas'\n\n #hoja de datos excel\n Excel = basedir+ u'/data/DataTest.xlsx'\n\n if Environment =='Dev':\n URL ='https://www.spotify.com/do/signup/'\n if Environment == 'Test':\n URL = 'https://en.wikipedia.org/wiki/PATH_(variable)'\n","repo_name":"fantasmagg/python-selenium","sub_path":"src/functions/inicializar.py","file_name":"inicializar.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"113037405","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Kmeans():\r\n def __init__(self):\r\n self.centralPoint = None\r\n\r\n def initialCentral(self, X, K):\r\n\r\n np.random.seed()\r\n M = list(range(X.shape[0]))\r\n np.random.shuffle(M)\r\n\r\n selectK = M[0:K]\r\n centralPoint = X[selectK, :]\r\n\r\n self.centralPoint = centralPoint\r\n return centralPoint\r\n\r\n def computeDistance(self, centralPoint, X):\r\n\r\n K = centralPoint.shape[0]\r\n N = X.shape[0]\r\n Dist = np.zeros((N,K),dtype=np.float)\r\n for i in range(K):\r\n d = np.sqrt(np.sum(np.power(centralPoint[i,:] - X, 2), axis = 1))\r\n Dist[:, i] = d\r\n\r\n return Dist\r\n\r\n def updateCentral(self,Dist, X):\r\n\r\n N = X.shape[0]\r\n K = Dist.shape[1]\r\n index = np.argmin(Dist, axis=1)\r\n centralPoint = np.zeros((K,X.shape[1]), dtype=np.float)\r\n Loss = 0\r\n for i in range(K):\r\n P = np.mean(X[index == i, :], axis=0, keepdims=True)\r\n centralPoint[i, :] = P\r\n Loss += np.sum(np.power(X[index == i, :] - P, 2))\r\n\r\n self.centralPoint = centralPoint\r\n Loss = Loss / N\r\n return Loss\r\n\r\n\r\n def fit(self, X, K, epoch = 10):\r\n\r\n self.initialCentral(X, K)\r\n Loss = []\r\n for i in range(epoch):\r\n Dist = self.computeDistance(self.centralPoint, X)\r\n min_loss = self.updateCentral(Dist, X)\r\n Loss.append(min_loss)\r\n\r\n print(\"iterative[%d] Loss is %2.3f\"%(i+1, min_loss))\r\n\r\n\r\n plt.figure(0)\r\n plt.plot(range(1,epoch+1),Loss,lw=2)\r\n plt.xlabel('iterative')\r\n plt.ylabel('Loss')\r\n\r\n if K==3 and X.shape[1]>=2:\r\n Dist = self.computeDistance(self.centralPoint,X)\r\n index = np.argmin(Dist,axis=1)\r\n Cluster1 = X[index==0,:]\r\n Cluster2 = X[index==1,:]\r\n Cluster3 = X[index==2,:]\r\n\r\n plt.figure(1)\r\n plt.scatter(Cluster1[:, 0], Cluster1[:, 1],10, c='r', marker='o',label='Cluster 1')\r\n plt.scatter(Cluster2[:, 0], Cluster2[:, 1], 10,c='b', marker='o', label='Cluster 2')\r\n plt.scatter(Cluster3[:, 0], Cluster3[:, 1], 10, c='k', marker='o', label='Cluster 3')\r\n plt.xlabel('feature 1', fontsize=18)\r\n plt.ylabel('feature 2', fontsize=18)\r\n\r\n plt.show()\r\n\r\n def predict(self, X):\r\n\r\n Dist = self.computeDistance(self.centralPoint, X)\r\n pred = np.argmin(Dist, axis=1)\r\n return pred\r\n\r\n","repo_name":"zhangyoujian/machine_learning_set","sub_path":"Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"86513550154","text":"from view import *\n\nclass CustomersWindow(QWidget):\n def __init__(self, data, mainWindow):\n super().__init__()\n self.data = data\n self.mainWindow = mainWindow\n self.layout = QGridLayout()\n self.rows = 1\n\n self.build()\n\n def build(self):\n\n self.layout = QGridLayout()\n\n self.scrollArea = QScrollArea(self)\n self.scrollArea.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.scrollArea.setWidgetResizable(True)\n self.scrollAreaContents = QWidget()\n self.scrollAreaContentsLayout = QGridLayout()\n\n titleBox = QGroupBox()\n titleBox.setFixedHeight(60)\n titleLayout = QGridLayout()\n titleLayout.addWidget(QLabel('Customer ID'), 0, 0)\n titleLayout.addWidget(QLabel('First Name'), 0, 1)\n titleLayout.addWidget(QLabel('Last Name'), 0, 2)\n titleLayout.addWidget(QLabel('Phone'), 0, 3)\n titleLayout.addWidget(QLabel('Email'), 0, 4)\n titleLayout.addWidget(QLabel('Account Created'), 0, 5)\n titleBox.setLayout(titleLayout)\n self.rows = 1\n\n self.scrollAreaContentsLayout.addWidget(titleBox)\n\n if self.data != None:\n\n for row in self.data['Views']['Customers']:\n button = LinkCustomerButton(str(row.customerID), self)\n subBox = QGroupBox()\n subBox.setFixedHeight(60)\n subLayout = QGridLayout()\n subLayout.addWidget(button, 0, 0)\n subLayout.addWidget(QLabel(row.firstName), 0, 1)\n subLayout.addWidget(QLabel(row.lastName), 0, 2)\n if row.phone != None:\n subLayout.addWidget(QLabel(row.phone), 0, 3)\n else:\n subLayout.addWidget(QLabel('None'), 0, 3)\n if row.email != None:\n subLayout.addWidget(QLabel(row.email), 0, 4)\n else:\n subLayout.addWidget(QLabel('None'), 0, 4)\n subLayout.addWidget(QLabel(str(row.date)), 0, 5)\n subBox.setLayout(subLayout)\n self.rows += 1\n self.scrollAreaContentsLayout.addWidget(subBox, self.rows, 0, 1, 0)\n\n self.scrollAreaContentsLayout.setAlignment(Qt.AlignTop)\n self.scrollAreaContents.setLayout(self.scrollAreaContentsLayout)\n self.scrollArea.setWidget(self.scrollAreaContents)\n self.scrollArea.setFixedWidth(1400)\n self.scrollArea.setFixedHeight(800)\n self.layout.addWidget(self.scrollArea)\n\n self.setLayout(self.layout)\n\n def updateData(self, data):\n self.data = data","repo_name":"justneedham/AlexandriaInterface","sub_path":"CustomersWindow.py","file_name":"CustomersWindow.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71280584757","text":"import re\n\nfrom src import constants\nfrom src.query_parse_exception import QueryParseException\n\n\nclass Query:\n \"\"\"\n Query format:\n IF CONDITION\n FOR TIME_WINDOW\n THEN INSTRUCTION\n EVERY DELAY SECONDS\n\n CONDITION can be:\n a python-style condition, which can use predefined values as well as numbers / booleans / operators etc.:\n - ball.x (the ball's position on the X axis);\n - ball.y (the ball's position on the Y axis);\n - player.1.x;\n - player.1.y;\n - player.2.x;\n - ...\n - player.6.x;\n - player.6.y;\n - midfield.x (the position at the middle of the field on the X axis: 0);\n\n TIME_WINDOW can be:\n LAST x SECONDS (where 'x' is a number)\n LAST x ENTRIES (where 'x' is a number)\n\n INSTRUCTION can be:\n PRINT(\"string\")\n\n DELAY can be:\n a number (2, 10, 0.5, etc)\n\n Examples of well-defined queries:\n IF ball.x < midfield.x\n FOR LAST 20 SECONDS\n THEN PRINT(\"Left team too defensive\")\n EVERY 0.5 SECONDS\n \"\"\"\n\n QUERY_IF = \"if\"\n QUERY_FOR = \"for\"\n QUERY_LAST = \"last\"\n QUERY_THEN = \"then\"\n QUERY_EVERY = \"every\"\n TIME_WINDOW_ENTRIES = \"entries\"\n TIME_WINDOW_SECONDS = \"seconds\"\n INSTRUCTION_PRINT = \"print\"\n PARSED_OPERAND_VALUES = [\n \"ball.x\",\n \"ball.y\",\n \"player.1.x\",\n \"player.1.y\",\n \"player.2.x\",\n \"player.2.y\",\n \"player.3.x\",\n \"player.3.y\",\n \"player.4.x\",\n \"player.4.y\",\n \"player.5.x\",\n \"player.5.y\",\n \"player.6.x\",\n \"player.6.y\",\n ]\n STATIC_OPERAND_VALUES = {\n \"midfield.x\": 0,\n }\n PLACEHOLDER = \"PLACEHOLDER\"\n\n CONDITION_CORRECT = \"Correct\"\n CONDITION_INCORRECT = \"Incorrect\"\n CONDITION_INCOMPLETE = \"Incomplete\"\n CONDITION_ERROR = \"Error\"\n\n TUTORIAL_TEXT = \"QUERY FORMAT:\\n\" \\\n \" IF condition\\n\" \\\n \" FOR LAST x time_window\\n\" \\\n \" THEN PRINT(\\\"message\\\")\\n\" \\\n \" EVERY delay SECONDS\\n\" \\\n \"\\n\" \\\n \"TERMINOLOGY:\\n\" \\\n \"- condition: a python-like condition which can use predefined operands, as well as \" \\\n \"numbers / booleans / operators etc;\\n\" \\\n \"- predefined operands:\\n\" \\\n \" - ball.x;\\n\" \\\n \" - ball.y;\\n\" \\\n \" - player.1/2/3/4/5/6.x;\\n\" \\\n \" - player.1/2/3/4/5/6.y;\\n\" \\\n \" - midfield.x (0);\\n\" \\\n \"- x: number\\n\" \\\n \"- time_window: 'SECONDS' or 'ENTRIES'\\n\" \\\n \"- message: a string printed when the condition is true 'FOR the LAST x SECONDS/ENTRIES'\\n\" \\\n \"- delay: the message will be printed AT MOST every 'delay' seconds\\n\" \\\n \"\\n\" \\\n \"You can start multiple queries at once; separate them by a blank line.\\n\"\n\n INITIAL_QUERIES = \"IF ball.x < midfield.x\\n\" \\\n \"FOR LAST 2 SECONDS\\n\" \\\n \"THEN PRINT(\\\"Orange team defending\\\")\\n\" \\\n \"EVERY 1 SECONDS\\n\" \\\n \"\\n\" \\\n \"IF ball.x > midfield.x\\n\" \\\n \"FOR LAST 2 SECONDS\\n\" \\\n \"THEN PRINT(\\\"Blue team defending\\\")\\n\" \\\n \"EVERY 1 SECONDS\\n\" \\\n \"\\n\" \\\n \"IF player.3.x > midfield.x and player.5.x > midfield.x and player.6.x > midfield.x\\n\" \\\n \"FOR LAST 1 SECONDS\\n\" \\\n \"THEN PRINT(\\\"Entire orange team is offensive\\\")\\n\" \\\n \"EVERY 0.5 SECONDS\"\n\n def __init__(self, query_string):\n self.query_string = query_string\n\n # init the query parameters\n self.condition = \"\"\n self.time_window_value = -1\n self.time_window_type = \"\"\n self.print_string = \"\"\n self.delay = -1\n\n # init the query evaluation variables\n self.fit_entries = 0\n self.first_entry_time = 0\n self.last_entry_time = 0\n self.last_print_time = 0\n\n # parse the query parameters' actual values from the given string\n self.parse_query()\n self.validate_parameters()\n self.invert_coordinates_in_condition()\n\n def parse_query(self):\n # lowercase everything, we don't use any capital letters\n # also trim it, as we don't need any whitespace at the beginning or the end\n self.query_string = self.query_string.lower().strip()\n\n # check if it starts with \"IF \", otherwise raise parsing error\n if not self.query_string.startswith(Query.QUERY_IF + \" \"):\n raise QueryParseException(\"IF clause missing from the query.\")\n\n # remove the beginning \"IF \" string\n self.query_string = self.query_string[len(Query.QUERY_IF + \" \"):]\n\n # extract the condition\n # first we search for the FOR clause\n for_clause_index = self.query_string.find(\" \" + self.QUERY_FOR + \" \")\n if for_clause_index == -1:\n for_clause_index = self.query_string.find(\"\\n\" + self.QUERY_FOR + \" \")\n if for_clause_index == -1:\n raise QueryParseException(\"FOR clause missing from the query.\")\n\n # extract the condition (spans until the FOR clause is reached)\n self.condition = self.query_string[:for_clause_index]\n\n # extract the time window in the FOR clause\n # first we search for the THEN clause\n then_clause_index = self.query_string.find(\" \" + self.QUERY_THEN + \" \")\n if then_clause_index == -1:\n then_clause_index = self.query_string.find(\"\\n\" + self.QUERY_THEN + \" \")\n if then_clause_index == -1:\n raise QueryParseException(\"THEN clause missing from the query.\")\n\n # extract the time window (spans until the THEN clause is reached)\n time_window_string = self.query_string[for_clause_index + 1:then_clause_index]\n if not time_window_string.startswith(Query.QUERY_FOR + \" \" + Query.QUERY_LAST + \" \"):\n raise QueryParseException(\"FOR clause must start with 'FOR LAST '.\")\n\n # remove the beginning \"FOR LAST \" string\n time_window_string = time_window_string[len(Query.QUERY_FOR + \" \" + Query.QUERY_LAST + \" \"):]\n\n # extract the time window type\n time_window_type_index = re.search(\"(\" + Query.TIME_WINDOW_ENTRIES + \"|\" + Query.TIME_WINDOW_SECONDS + \")\",\n time_window_string)\n if time_window_type_index is None:\n raise QueryParseException(\"Time window type missing from the FOR clause.\")\n time_window_type_index = time_window_type_index.start()\n self.time_window_type = time_window_string[time_window_type_index:].strip()\n\n # extract the time window value\n self.time_window_value = time_window_string[:time_window_type_index].strip()\n\n # we find the \"EVERY\" clause beginning\n every_clause_index = self.query_string.find(\" \" + self.QUERY_EVERY + \" \")\n if every_clause_index == -1:\n every_clause_index = self.query_string.find(\"\\n\" + self.QUERY_EVERY + \" \")\n if every_clause_index == -1:\n raise QueryParseException(\"EVERY clause missing from the query.\")\n\n # check if the print instruction is present\n then_clause_string = self.query_string[then_clause_index:every_clause_index]\n print_index = then_clause_string.find(\" \" + Query.INSTRUCTION_PRINT + \"(\")\n if print_index == -1:\n raise QueryParseException(\"Print instruction missing from the THEN clause.\")\n\n # extract the string from the print instruction\n self.print_string = then_clause_string[len('THEN PRINT(\"') + 1:-2]\n\n # extract the 'EVERY' clause (spans until the end of the message)\n every_clause_string = self.query_string[every_clause_index + 1:]\n if not every_clause_string.startswith(Query.QUERY_EVERY + \" \"):\n raise QueryParseException(\"EVERY clause must start with 'EVERY '.\")\n\n # remove the beginning \"FOR LAST \" string\n every_clause_string = every_clause_string[len(Query.QUERY_EVERY + \" \"):]\n\n # remove the \" SECONDS\" at the end of the clause\n seconds_index = every_clause_string.find(\"seconds\")\n if seconds_index == -1:\n raise QueryParseException(\"'SECONDS' keyword missing from the EVERY clause.\")\n\n # extract the delay from the 'EVERY' clause\n self.delay = every_clause_string[:seconds_index].strip()\n\n @staticmethod\n def validate_number(value, exception_message):\n try:\n return float(value)\n except ValueError:\n raise QueryParseException(exception_message)\n\n def validate_parameters(self):\n self.time_window_value = Query.validate_number(self.time_window_value,\n \"Time window value (FOR) must be a number.\")\n self.delay = Query.validate_number(self.delay,\n \"DELAY (EVERY) must be a number.\")\n\n def invert_coordinates(self, value):\n self.condition = self.condition.replace(value + \".x\", Query.PLACEHOLDER)\n self.condition = self.condition.replace(value + \".y\", value + \".x\")\n self.condition = self.condition.replace(Query.PLACEHOLDER, value + \".y\")\n\n def invert_coordinates_in_condition(self):\n \"\"\"\n We must invert any coordinates (ball.x with ball.y), and so on,\n because the frames' coordinates are reversed.\n \"\"\"\n self.invert_coordinates(\"ball\")\n self.invert_coordinates(\"player.1\")\n self.invert_coordinates(\"player.2\")\n self.invert_coordinates(\"player.3\")\n self.invert_coordinates(\"player.4\")\n self.invert_coordinates(\"player.5\")\n self.invert_coordinates(\"player.6\")\n\n def evaluate_condition_for_message(self, message: dict):\n enhanced_condition = self.condition\n # replace all the static values in the condition\n for static_operand in Query.STATIC_OPERAND_VALUES.items():\n enhanced_condition = enhanced_condition.replace(static_operand[0], str(static_operand[1]))\n\n # replace all the parsed values in the condition (values found in the given message)\n for parsed_operand in Query.PARSED_OPERAND_VALUES:\n try:\n parsed_operand_value = message\n for operand_key in parsed_operand.split(\".\"):\n parsed_operand_value = parsed_operand_value[operand_key]\n enhanced_condition = enhanced_condition.replace(parsed_operand, str(parsed_operand_value))\n except KeyError:\n # if the operand is correct, but we can't find it in the message because there is\n # no update yet, we return it as incomplete\n if parsed_operand in self.condition:\n return Query.CONDITION_INCOMPLETE\n\n try:\n # evaluate the condition without any builtins to prevent any injections\n if eval(enhanced_condition, {'__builtin__': None}):\n return Query.CONDITION_CORRECT\n else:\n return Query.CONDITION_INCORRECT\n except (SyntaxError, ZeroDivisionError, NameError, TypeError, KeyError):\n # return CONDITION_ERROR if the eval failed because of any issues; we will print a message\n # for the user to know that the query's condition is broken for this particular event\n return Query.CONDITION_ERROR\n\n def add_message(self, message: dict):\n # evaluate the query condition, based on the new message\n condition_result = self.evaluate_condition_for_message(message)\n\n # check if the condition can be evaluated or if it is syntactically incorrect\n if condition_result == Query.CONDITION_ERROR:\n return \"Error evaluating query condition!\"\n elif condition_result == Query.CONDITION_CORRECT:\n # if the condition is true, we update the query evaluation parameters and check if we should\n # print the message in the query's 'THEN' clause\n\n # update query evaluation parameters\n if self.fit_entries == 0:\n self.first_entry_time = message[constants.FRAME_TIME]\n self.last_entry_time = message[constants.FRAME_TIME]\n self.fit_entries += 1\n\n # check if we should print the message, according to the time window type, value and delay\n if self.time_window_type == Query.TIME_WINDOW_ENTRIES and self.fit_entries >= self.time_window_value or \\\n self.time_window_type == Query.TIME_WINDOW_SECONDS and \\\n self.last_entry_time - self.first_entry_time >= self.time_window_value and \\\n self.last_entry_time - self.last_print_time >= self.delay:\n self.last_print_time = self.last_entry_time\n return self.print_string\n elif condition_result == Query.CONDITION_INCOMPLETE:\n # if the condition is incomplete, we do not print anything, but don't reset the query parameters;\n # the query's state remains the same\n return None\n else:\n # if the condition is false, we do not print anything and reset the query evaluation parameters\n self.fit_entries = 0\n self.first_entry_time = 0\n self.last_entry_time = 0\n return None\n","repo_name":"Alxertion/RocketLeagueReplayAnalyzer","sub_path":"src/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":13691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"6852929241","text":"from datetime import datetime\nimport websocket, json, pprint, talib, numpy as np, math\nimport config\nfrom binance.client import Client\nfrom binance.enums import *\n\nclient = Client(config.API_KEY, config.API_SECRET)\nnow = datetime.now()\n\ntry:\n print(\"Sending order\")\n order = client.create_test_order(symbol='ETHUSDT',side='SELL',type=ORDER_TYPE_MARKET,quantity=1)\n #print(order)\n acc = client.get_account()\n #print(acc[\"balances\"])\n for shit in acc[\"balances\"]:\n if shit[\"asset\"] == 'ETH':\n print(\"ETH: {}\".format(shit['free']))\n if shit[\"asset\"] == 'USDT':\n print(\"USDT: {}\".format(shit['free']))\n \nexcept Exception as e:\n print(\"Order failed - {}\".format(e))\n ","repo_name":"andrismrnvszki/trading-bot","sub_path":"Crypto Bot/v1/rsibot/get_acc_data.py","file_name":"get_acc_data.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24734725848","text":"'''\r\nCreated on 7 Aug 2018\r\n\r\n@author: wvx67826\r\n\r\n@Description:\r\n This take scattering formfactor and calculate the scattered intensisty\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ni = complex(0,1)\r\n\r\ndef th2q(lamda, angle):\r\n return 4.0*np.pi/lamda*np.sin(angle/180*np.pi)\r\ndef q2th(lamda,q):\r\n return np.arcsin(q*lamda/4.0/np.pi)\r\n\r\ndef fm(z1,z2,z3,lamda,q,f1,f2):\r\n return i*f1*np.matrix([[0.0, z1*np.cos(q2th(lamda,q)-z3*np.sin(q2th(lamda,q)))],\r\n [z3*np.sin(q2th(lamda,q)) - z1*np.cos(q2th(lamda,q)), \r\n -z2*np.sin(2.0*q2th(lamda,q))]])+f2*np.matrix([[z2**2,\r\n -z2*(z1*np.sin(q2th(lamda,q))-z3*np.cos(q2th(lamda,q)))],\r\n [z2*(z1*np.sin(q2th(lamda,q))+z3*np.cos(q2th(lamda,q))),\r\n np.cos(q2th(lamda,q))**2*(z1**2*np.tan(q2th(lamda,q))**2+z3**2)]])\r\ndef f0 (lamda, q,scale):\r\n return scale*np.matrix([[1.0, 0.0],\r\n [0.0, np.cos(2.0*q2th(lamda,q))]])\r\n\r\ndef set_ipol(pol, angle = 0):\r\n ipol = {\"Si\" : np.matrix([[1.0],\r\n [0.0]]),\r\n \"Pi\" : np.matrix([[0.0],\r\n [1.0]]),\r\n \"LC\" : np.matrix([[0.707106781186547],\r\n [0.707106781186547*i]]),\r\n \"RC\" : np.matrix([[0.707106781186547],\r\n [-0.707106781186547*i]]),\r\n \"LA\" : np.matrix([[np.cos(np.deg2rad(angle))],\r\n [np.sin(np.deg2rad(angle))]]),\r\n }\r\n\r\n return ipol[pol]\r\ndef intensity (q,lamda, z,z1,z2,z3,fs,f1,f2):\r\n temp = np.matrix([[0,0],[0,0]])\r\n \r\n for k in range(1):\r\n for y in z:\r\n z1rot = z1#*np.sin(2.0*y/20*np.pi+3*np.pi/4.0)\r\n z2rot = z2#*np.cos(2.0*y/20*np.pi)\r\n z3rot = z3#*np.cos(2.0*y/20*np.pi)\r\n temp = temp + np.exp(i*(y)*q)*((f0(lamda,q,fs))+fm(z1rot,z2rot,z3rot,lamda,q,f1,f2))\r\n\r\n\r\n return temp\r\n\r\n\r\ndef pol_intensity(intensity, inPol, outPol, inAngle = 0, outAngle = 0):\r\n mIpol = set_ipol(inPol,inAngle)\r\n mFinI = np.multiply(intensity,mIpol)\r\n if outPol == \"Si+Pi\":\r\n finI= np.dot(mFinI[0,0]+mFinI[1,0],np.conj(mFinI[0,0]+mFinI[1,0])) + np.dot(\r\n mFinI[0,1]+mFinI[1,1],np.conj(mFinI[0,1]+mFinI[1,1]))\r\n return np.absolute(finI)\r\n if outPol == \"Si\":\r\n\r\n finI = np.dot(mFinI[0,0]+mFinI[1,0],np.conj(mFinI[0,0]+mFinI[1,0]))\r\n return np.absolute(finI)\r\n if outPol == \"Pi\":\r\n \r\n finI = np.dot(mFinI[0,1]+mFinI[1,1],np.conj(mFinI[0,1]+mFinI[1,1]))\r\n return np.absolute(finI)\r\n \r\n \r\n if outPol == \"LA\":\r\n finI= np.dot((mFinI[0,0]+mFinI[1,0])*np.cos(np.deg2rad(outAngle)),np.conj(mFinI[0,0]+mFinI[1,0])*np.cos(np.deg2rad(outAngle))) + np.dot(\r\n (mFinI[0,1]+mFinI[1,1])*np.sin(np.deg2rad(outAngle)),np.conj(mFinI[0,1]+mFinI[1,1])*np.sin(np.deg2rad(outAngle)))\r\n return np.absolute(finI)\r\n \r\n \r\n\r\n\r\n \r\n\r\nlamda = 1.54\r\nq = np.arange(0.2, 3.2, 0.01)\r\nz = np.arange (0, 200., 4)\r\niMeasure1 = []\r\niMeasure2 = []\r\niMeasure3 = []\r\niMeasure4 = []\r\niMeasure5 = []\r\niMeasure6 = []\r\niMeasure7 = []\r\niMeasure8 = []\r\niMeasure9 = []\r\niMeasure10 = []\r\niMeasure11 = []\r\niMeasure12 = []\r\n\r\n\r\n\r\nfm1 = []\r\nfm2 = []\r\n\r\nz1 = 1.0\r\nz2 = 0.0\r\nz3 = 0.1\r\n\r\nfs = 0.0\r\nf1 = 1.0\r\nf2 = 0\r\n\r\nfor k in q:\r\n tempI = intensity(k,lamda, z,z1,z2,z3, fs, f1,f2)\r\n iMeasure1 = np.append(iMeasure1,pol_intensity(tempI,\"Si\",\"Si\"))\r\n iMeasure2 = np.append(iMeasure2,pol_intensity(tempI,\"Si\",\"Pi\"))\r\n iMeasure3 = np.append(iMeasure3,pol_intensity(tempI,\"Pi\",\"Si\"))\r\n iMeasure4 = np.append(iMeasure4,pol_intensity(tempI,\"Pi\",\"Pi\"))\r\n \r\n \r\nz1 = 0.0\r\nz2 = 1.0\r\nz3 = 0.1\r\nf2 = 0.0\r\n\r\nfor k in q:\r\n tempI = intensity(k,lamda, z,z1,z2,z3,fs, f1,f2)\r\n iMeasure5 = np.append(iMeasure5,pol_intensity(tempI,\"Si\",\"Si\"))\r\n iMeasure6 = np.append(iMeasure6,pol_intensity(tempI,\"Si\",\"Pi\"))\r\n iMeasure7 = np.append(iMeasure7,pol_intensity(tempI,\"Pi\",\"Si\"))\r\n iMeasure8 = np.append(iMeasure8,pol_intensity(tempI,\"Pi\",\"Pi\")) \r\n\r\ncd = iMeasure1-iMeasure2\r\npolIn = []\r\npolOut = []\r\nangle = np.arange(0,1,0.05)\r\n\r\nz1 = 0.0\r\nz2 = 1.0\r\nz3 = 1.0\r\n \r\nfor a,b in enumerate (angle):\r\n\r\n tempI = intensity(1.568*2, lamda, z,z1,z2,z3,fs, f1,b)\r\n iMeasure9 = np.append(iMeasure9,pol_intensity(tempI,\"Si\",\"Si\"))\r\n iMeasure10 = np.append(iMeasure10,pol_intensity(tempI,\"Si\",\"Pi\"))\r\n iMeasure11 = np.append(iMeasure11,pol_intensity(tempI,\"Pi\",\"Si\"))\r\n iMeasure12 = np.append(iMeasure12,pol_intensity(tempI,\"Pi\",\"Pi\")) \r\n \r\n\r\nplt.figure(1)\r\nplt.subplot(221)\r\nplt.plot(q,iMeasure1)\r\nplt.plot(q,iMeasure5)\r\nplt.title(\"Si-Si\")\r\nplt.subplot(222)\r\nplt.plot(q,iMeasure2)\r\nplt.plot(q,iMeasure6)\r\nplt.title(\"Si-pi\")\r\nplt.subplot(223)\r\nplt.plot(q,iMeasure3)\r\nplt.plot(q,iMeasure7)\r\nplt.title(\"pi-si\")\r\nplt.subplot(224)\r\nplt.plot(q,iMeasure4)\r\nplt.plot(q,iMeasure8)\r\nplt.title(\"pi-pi\")\r\n\r\nplt.figure(2)\r\nplt.subplot(221)\r\nplt.plot(q,iMeasure1-iMeasure5)\r\nplt.title(\"Si-Si different\")\r\nplt.subplot(222)\r\nplt.plot(q,iMeasure2-iMeasure6)\r\nplt.title(\"Si-pi different\")\r\n\r\nplt.subplot(223)\r\nplt.plot(q,iMeasure3-iMeasure7)\r\nplt.title(\"pi-Si different\")\r\nplt.subplot(224)\r\nplt.plot(q,iMeasure4-iMeasure8)\r\nplt.title(\"Pi-Pi different\")\r\n\r\nplt.figure(3)\r\nplt.subplot(221)\r\nplt.plot(angle,iMeasure9)\r\n\r\nplt.title(\"Si-Si\")\r\nplt.subplot(222)\r\nplt.plot(angle,iMeasure10)\r\n\r\nplt.title(\"Si-pi\")\r\nplt.subplot(223)\r\nplt.plot(angle,iMeasure11)\r\n\r\nplt.title(\"pi-si\")\r\nplt.subplot(224)\r\nplt.plot(angle,iMeasure12)\r\n\r\nplt.title(\"pi-pi\")\r\n\r\nplt.show()","repo_name":"Relm-Arrowny/dataAnalysis","sub_path":"modelling/Diffraction/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27823165965","text":"# 접근 : 15가 될 때 마다 3*5를 5*3으로 바꿔볼까\n# 모든 정수를 3n+0 3n+5 3n+10로 분류\n\n# 시작\nAmt = int(input())\n\n\nif Amt%3==0: #case 1 (최종 +0)\n Answer = Amt // 15 # 변환 단위 (3*5 to 5*3)\n Amt = Amt - Answer * 15\n Answer = (3 * Answer) + (Amt // 3)\nelif Amt%3==2: #case 2 (최종 +1)\n Amt = Amt - 5\n if Amt <0:\n Answer = -1\n else:\n Answer = Amt // 15\n Amt = Amt - Answer * 15\n Answer = (3 * Answer) + (Amt // 3)\n Answer = Answer + 1\nelse: #case 3 (최종 +2)\n Amt = Amt - 10\n if Amt <0:\n Answer = -1\n else:\n Answer = Amt // 15\n Amt = Amt - Answer * 15\n Answer = (3 * Answer) + (Amt // 3)\n Answer = Answer + 2\n\nprint(Answer) \n\n","repo_name":"k-min9/TIL","sub_path":"00. Daily Algorithm/BOJ/BOJ2839_Bz1_설탕배달.py","file_name":"BOJ2839_Bz1_설탕배달.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"20493642170","text":"\nimport os\nimport re\nimport sys\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport annotation_utils as anno_utils\n\nUSRNAME ='u148188' # 'carina'\n\ndef load_df(fpath):\n return pd.read_json(fpath, compression='gzip', orient='split')\n\n# Stanford PoS Tagger\ndef add_pos_tags(refdf, out_fpath=None):\n pos_tagger = anno_utils.load_pos_tagger()\n refdf['tagged_stnf'] = anno_utils.tag_refExp(refdf[['refexp']].values.flatten(order='K').tolist(), pos_tagger=pos_tagger)\n if out_fpath:\n refdf.to_json(out_fpath, compression='gzip', orient='split')\n return refdf\n\n# Stanford Neural Dependency Parser\ndef add_dep_parses(refdf, out_fpath=None):\n dep_parser = anno_utils.load_dep_parser() \n parses = anno_utils.parse_refEpx(\n refdf[['refexp']].values.flatten(order='K').tolist(), dep_parser)\n refdf['depparse_stnf'] = parses\n if out_fpath:\n refdf.to_json(out_fpath, compression='gzip', orient='split')\n return refdf\n\ndef add_dep_parses_from_json(json_fpath, out_fpath=None):\n # TODO 1: probably this can be done much better, still new to pandas ...\n # TODO 2: double-check it still also works with refcoco\n parses = pd.read_json(json_fpath, #compression='gzip', \n orient='columns')\n indices_fpath = \"{0}.idx\".format(\n re.sub(\"(.+?)(\\.txt)?(\\.json)?(\\.gz)?\", r\"\\1\", json_fpath))\n indices = pd.read_csv(indices_fpath, sep=\",\", header=None)\n indices.drop(columns=0, inplace=True)\n indices.rename({1: \"rex_id\", 2: \"image_id\", 3: \"region_id\"}, axis=1, inplace=True)\n\n sents = pd.DataFrame(parses[\"sentences\"], index=parses.index)\n const_parses = pd.DataFrame(sents.applymap(lambda x: x[\"parse\"]), index=parses.index)\n const_parses.rename({\"sentences\": \"parse\"}, axis=1, inplace=True)\n dep_parses = pd.DataFrame(sents.applymap(lambda x: x[\"basicDependencies\"]), index=parses.index)\n dep_parses.rename({\"sentences\": \"basicDependencies\"}, axis=1, inplace=True)\n const_dep_parses = dep_parses.join(const_parses, how='left')\n \n parse_df = indices.join(const_dep_parses, how='left')\n if out_fpath:\n parse_df.to_json(out_fpath, compression='gzip', orient='split')\n return parse_df\n\ndef add_root_from_dep_parse(json_fpath, out_fpath=None):\n # TODO: probably this can be done much better, still new to pandas ...\n parses = pd.read_json(json_fpath, compression='gzip', orient='columns')\n indices_fpath = \"{0}.idx\".format(\n re.sub(\"(.+?)(\\.txt)?(\\.json)?(\\.gz)?\", r\"\\1\", json_fpath))\n indices = pd.read_csv(indices_fpath, sep=\",\", header=None, index_col=0)\n indices.rename({0: \"rex_id\", 1: \"image_id\", 2: \"region_id\"}, axis=1, inplace=True)\n\n sents = pd.DataFrame(parses[\"sentences\"], index=parses.index)\n dep_roots = pd.DataFrame(sents.applymap(lambda x: x[\"basicDependencies\"][0]), index=parses.index)\n dep_roots.rename({\"sentences\": \"deproot\"}, axis=1, inplace=True)\n parse_df = indices.join(dep_parses, how='left')\n if out_fpath:\n refdf.to_json(out_fpath, compression='gzip', orient='split')\n return parse_df\n \n\n# Attributes and Names\ndef add_attrs_names(refdf, out_fpath=None):\n if 'tagged' in refdf:\n refdf['attr_name'] = refdf['tagged'].apply(lambda x: anno_utils.get_refanno(x))\n if 'tagged_stnf' in refdf:\n refdf['attr_name_stnf'] = refdf['tagged_stnf'].apply(lambda x: anno_utils.get_refanno(x))\n if out_fpath:\n refdf.to_json(out_fpath, compression='gzip', orient='split')\n return refdf\n \n\n# WordNet\ndef add_synsets(refdf, out_fpath=None):\n if 'tagged' in refdf:\n refdf['wn_anno'] = refdf['tagged'].apply(lambda x: get_wn_anno(x))\n if 'tagged_stnf' in refdf:\n refdf['wn_anno_stnf'] = refdf['tagged_stnf'].apply(lambda x: get_wn_anno(x))\n if out_fpath:\n refdf.to_json(out_fpath, compression='gzip', orient='split')\n return refdf\n \ndef get_wn_anno(refdf_tagged):\n wn_annos = []\n for (word, tag) in refdf_tagged:\n pos = anno_utils.tag2pos(tag)\n if pos:\n synset = anno_utils.get_synset_first(word, pos=pos)\n lexfile_info = anno_utils.get_ss_lexfile_info(synset)\n wn_annos.append((anno_utils.get_synset_name(synset), lexfile_info))\n else:\n wn_annos.append((None, None))\n return wn_annos\n\nif __name__==\"__main__\":\n json_fpath = \"/media/%s/Carina_2017/UdS/data/flickr30k_refdf.json.gz\" % (USRNAME)\n json_foutpath = \"/media/%s/Carina_2017/UdS/data/flickr30k_refdf_wn.json.gz\" % (USRNAME)\n\n if len(sys.argv) > 1:\n json_fpath = sys.argv[1]\n if len(sys.argv) > 2:\n json_foutpath = sys.argv[2]\n\n #refdf = load_df(json_fpath)\n #refdf = add_pos_tags(refdf) # assignment not really necessary\n #refdf = add_synsets(refdf)\n #refdf = add_attrs_names(refdf, json_foutpath)\n #refdf = add_dep_parses(refdf)\n refdf = add_dep_parses_from_json(json_fpath,json_foutpath)\n \n \n ","repo_name":"carinasilberer/names_in_context","sub_path":"utils/df_annotation.py","file_name":"df_annotation.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19534054549","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nimport keras.utils\nimport keras.optimizers\nfrom keras.preprocessing.text import text_to_word_sequence\nfrom keras.backend import argmax\n\n# define the document\nimport pandas as pd\nfrom preprocess import get_data, error_rate, vectorizer, tokenize, stem_tokens\nimport operator\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nimport numpy as np\n\nfrom itertools import product\n\nclass KerasNeuralNetwork():\n def __init__(self, hidden_layer_sizes, nonlin_functions, dropout_coef):\n if (len(hidden_layer_sizes) != len(dropout_coef)) and (len(hidden_layer_sizes) != len(nonlin_functions)):\n print(\"LENGTH OF hidden_layer_sizes PARAMETERS MUST EQUAL TO LENGTH OF dropout_coef AND EQUAL TO LENGTH OF nonlin_functions\")\n raise ValueError\n self.hidden_layers = hidden_layer_sizes\n self.nonlin_functions = nonlin_functions\n self.dropout_coef = dropout_coef\n\n #hidden_layers = property(operator.attrgetter('_hidden_layers'))\n #@hidden_layers.setter\n #def value(self, hidden_layer_sizes):\n # if not all(layer_size in [\"relu\", \"tanh\", \"softmax\"] for layer_size in hidden_layer_sizes):\n # raise Exception(\"'hidden_layer_sizes' parameter must be a list that contains 'relu', 'tanh' or 'softmax' elements\")\n # self._hidden_layers = hidden_layer_sizes\n\n def text_process_data(self, X, Y=np.array([])):\n X = X.astype(np.float32)\n if not isinstance(X, np.ndarray):\n X = X.toarray()\n #X = X.astype(np.float32)\n if Y.any():\n Y = keras.utils.to_categorical(Y, num_classes=2).astype(np.float32)\n X, Y = shuffle(X, Y)\n Y_flat = np.argmax(Y, axis=1)\n #print(X.shape, Y.shape)\n return X, Y, Y_flat\n return X\n\n\n def init_model(self, D, K, loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'], custom_layers_sequence=None):\n print('Compiling Model ... ')\n\n # the model will be a sequence of layers\n model = Sequential()\n\n if custom_layers_sequence == None:\n for i in range(len(self.hidden_layers)):\n if i == 0:\n model.add(Dense(units=self.hidden_layers[0], input_dim=D))\n else:\n model.add(Dense(units=self.hidden_layers[i]))\n model.add(Activation(self.nonlin_functions[i]))\n model.add(Dropout(self.dropout_coef[i]))\n model.add(Dense(units=K))\n model.add(Activation('softmax'))\n else:\n '''later here may be implemented custom layer adding mechanism'''\n pass\n\n ''' list of losses: https://keras.io/losses/ \n list of optimizers: https://keras.io/optimizers/ \n list of metrics: https://keras.io/metrics/'''\n\n model.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n return model\n\n def fit_network(self, X, Y, model, epochs=20, batch=256, show=True):\n #X, Y, Y_flat = self.text_process_data(X, Y)\n try:\n print('Training model...')\n #print(type(X), type(Y))\n r = model.fit(X, Y, validation_split=0.25, epochs=epochs, batch_size=batch, verbose=2)\n\n if show:\n plt.figure(figsize=(12, 9)) # make separate figure\n plt.subplot(2, 1, 1)\n plt.plot(r.history['loss'], label='loss')\n plt.plot(r.history['val_loss'], label='val_loss')\n plt.legend()\n plt.grid()\n\n plt.subplot(2, 1, 2)\n plt.plot(r.history['acc'], label='acc')\n plt.plot(r.history['val_acc'], label='val_acc')\n plt.legend()\n plt.grid()\n plt.show()\n\n except KeyboardInterrupt:\n print('KeyboardInterrupt')\n return r, r.history['loss']\n\n return r\n\n def make_prediction(self, X, model, batch=32, verbose=0):\n X = self.text_process_data(X)\n return np.argmax(model.predict(X, batch_size=batch, verbose=verbose), axis=1)\n\n\ndef main_search(iter):\n lst = [1768, 1555, 894, 674, 65, 54, 137, 327, 354, 492, 553, 634, 844, 928, 1054, 1118, 1228, 1449, 1474, 1483,\n 1504, 1529, 1559, 1733, 1881, 1917]\n data = get_data(\"products_sentiment_train.tsv\", \"products_sentiment_test_copy.tsv\", balance=True, drop_lst=lst)\n X_train, Y_train = vectorizer(np.array(data[0][\"text\"]), \n\t\t\t\t tokenizer=tokenize, \n\t\t\t\t ngram_range=(1, 4), \n\t\t\t\t max_df=0.85, \n\t\t\t\t min_df=1, \n\t\t\t\t max_features=None), \\\n\t\t np.array(data[0][\"label\"])\n\n #network_class = KerasNeuralNetwork(hidden_layer_sizes=(400, 100, 20, 10), \n #\t\t\t\t\tnonlin_functions=(\"tanh\", \"relu\", \"relu\", \"relu\"), \n #\t\t\t\t\tdropout_coef=(0.9, 0.8, 0.8, 0.8))\n network_class = KerasNeuralNetwork(hidden_layer_sizes=(400, 200, 10), \n\t\t\t\t nonlin_functions=(\"tanh\", \"tanh\", \"tanh\"), \n \t\t\t\t dropout_coef=(0.7, 0.7, 0.7))\n X, Y, Y_flat = network_class.text_process_data(X_train, Y=Y_train)\n\n\n result = []\n for i in range(iter):\n neural_network = network_class.init_model(D=X.shape[1], \n\t\t\t\t\t\t K=Y.shape[1], \n\t\t\t\t\t\t loss='categorical_crossentropy',\n optimizer=keras.optimizers.Adam(lr=0.0001, \n\t\t\t\t\t\t\t\t\t\t beta_1=0.9, \n\t\t\t\t\t\t\t\t\t\t beta_2=0.99, \n\t\t\t\t\t\t\t\t\t\t epsilon=1e-08, \n\t\t\t\t\t\t\t\t\t\t decay=0.0), \n\t\t\t\t\t\t metrics=['accuracy'])\n r = network_class.fit_network(X, Y, neural_network, epochs=30, batch=130, show=False)\n result.append(r)\n \n plt.figure(figsize=(12, 9)) # make separate figure\n plt.subplot(2, 1, 1)\n plt.plot(np.sum(np.array([r.history['loss'] for r in result]), axis=0)/iter, label='loss')\n plt.plot(np.sum(np.array([r.history['val_loss'] for r in result]), axis=0)/iter, label='val_loss')\n plt.legend()\n plt.grid()\n\n plt.subplot(2, 1, 2)\n plt.plot(np.sum(np.array([r.history['acc'] for r in result]), axis=0)/iter, label='acc')\n plt.plot(np.sum(np.array([r.history['val_acc'] for r in result]), axis=0)/iter, label='val_acc')\n plt.legend()\n plt.grid()\n plt.show()\n\ndef main_predict():\n lst = [1768, 1555, 894, 674, 65, 54, 137, 327, 354, 492, 553, 634, 844, 928, 1054, 1118, 1228, 1449, 1474, 1483,\n 1504, 1529, 1559, 1733, 1881, 1917]\n data = get_data(\"products_sentiment_train.tsv\", \"products_sentiment_test_copy.tsv\", balance=True, drop_lst=lst)\n train_size = (np.array(data[0][\"text\"])).shape[0]\n \n X_full, Y_full = vectorizer(np.append(np.array(data[0][\"text\"]), np.array(data[1][\"text\"])), \n\t\t\t\ttokenizer=tokenize, \n\t\t\t\tngram_range=(1, 4), \n\t\t\t\tmax_df=0.85, \n\t\t\t\tmin_df=1, \n\t\t\t\tmax_features=None), \\\n\t\t np.array(data[0][\"label\"])\n\n network_class = KerasNeuralNetwork(hidden_layer_sizes=(400, 200, 10), \n\t\t\t\t nonlin_functions=(\"tanh\", \"tanh\", \"tanh\"), \n\t\t\t\t dropout_coef=(0.7, 0.7, 0.7))\n \n X_train, Y_train, Y_flat_train = network_class.text_process_data(X_full[:train_size, :], Y=Y_full)\n neural_network = network_class.init_model(D=X_train.shape[1], \n\t\t\t\t\t K=Y_train.shape[1], \n \t\t\t\t\t loss='categorical_crossentropy',\n optimizer=keras.optimizers.Adam(lr=0.0001, \n\t\t\t\t\t\t\t\t\t beta_1=0.9, \n\t\t\t\t\t\t\t\t\t beta_2=0.99, \n\t\t\t\t\t\t\t\t\t epsilon=1e-08, \n\t\t\t\t\t\t\t\t\t decay=0.0), \n\t\t\t\t\t metrics=['accuracy'])\n\n r = network_class.fit_network(X_train, Y_train, neural_network, epochs=30, batch=130, show=False)\n\n X_predict = network_class.text_process_data(X_full[train_size:, :])\n result = network_class.make_prediction(X_predict, neural_network, batch=130)\n with open(\"keras_adam.csv\", 'w') as f_out:\n f_out.write(pd.DataFrame(pd.Series(map(str, range(0, 500))).str.cat(list(map(str, result)), sep=','),\n columns=[\"Id,y\"]).to_csv(sep=\" \", index=False))\n\n#main_search(10)\nmain_predict()","repo_name":"Raccoon987/Neural-Networks","sub_path":"3) sentiment analysis tensorflow keras/keras_example.py","file_name":"keras_example.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"74024553716","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('MultisenseHotelSystem', '0015_auto_20151206_1015'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='status',\n field=models.CharField(default='receive', max_length=30),\n preserve_default=False,\n ),\n ]\n","repo_name":"georgejinme/MultisenseHotelSystem","sub_path":"MultisenseHotelSystem/migrations/0016_order_status.py","file_name":"0016_order_status.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"5873903678","text":"\"\"\"\nSST runner for local runs only with no writing to s3\n\"\"\"\nimport json\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\nfrom itertools import product\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom boto3 import Session\n\nfrom common.logger import set_up_logger\nfrom storms.cluster import (\n Cluster,\n Clusterer,\n adjust_cluster_size,\n cells_to_geometry,\n get_atlas14,\n get_xr_dataset,\n number_of_cells,\n rank_by_max,\n rank_by_mean,\n rank_by_norm,\n s3_geometry_reader,\n write_dss,\n)\nfrom storms.utils import plotter\n\nsession = Session(os.environ[\"AWS_ACCESS_KEY_ID\"], os.environ[\"AWS_SECRET_ACCESS_KEY\"])\n\n\ndef main(\n start: str,\n duration: int,\n domain_name: str,\n domain_uri: str,\n watershed_uri: str,\n minimum_threshold: float,\n dss_dir: str,\n png_dir: str,\n scale_max: int,\n):\n \"\"\"\n Main function to extract clusters from hourly AORC precipitation grids.\n AORC data is read and aggregated in an xarray dataset.\n A combination of thresholding and clustering is used to identify continguous clusters\n containing the greatest accumulated precipitation.\n\n Multiprocessing is used for cluster size adjustments where cells are iteratively added and\n removed from a cluster until the desired target number of cells are reached. In some instances,\n removing a cell can make a cluster non-contiguous (i.e., two separate clusters). In this case,\n those disconnected clusters are added back into the processing list (`args` variable). Additionally,\n any cluster that has obtained the desired number of cells is removed from the processing list.\n The process then restarts for any clusters remaining in the processing list, until that list is\n empty, meaning that all clusters are at the desired size.\n\n Once all clusters have finished processing, statistics and ranks are gathered to determine which\n cluster has the greatest average, maximum, and normalized average accumulate precipitation.\n\n\n Parameters\n start: str\n String format of date (%Y-%m-%d)\n duration: int\n interval or duration in hours\n domain_name: str\n name to include in the DSS paths\n domain_uri: str\n S3 URI for the transposition domain geometry\n watershed_uri: str\n S3 URI for the watershed geometry\n minimum_threshold: float\n lowest value to potentially include in clustering\n dss_dir: str\n file location to write DSS file to\n png_dir: str\n file location to write PNG files to\n scale_max: int\n value at the top of the scale in plotting\n\n\n example usage: python extract_storms.py 1979-02-01 2\n \"\"\"\n\n data_type = \"precipitation\"\n\n # convert str to datetime\n start = datetime.strptime(start, \"%Y-%m-%d\")\n start_as_str = start.strftime(\"%Y%m%d\") # for use in file naming\n\n # read in watershed geometry and transposition domain geometry (shapely polygons)\n transposition_geom = s3_geometry_reader(session, domain_uri)\n watershed_geom = s3_geometry_reader(session, watershed_uri)\n\n # read AORC data into xarray (time series)\n # this will be used later to write to dss\n try:\n xdata = get_xr_dataset(data_type, start, duration, mask=transposition_geom)\n logging.info(\n json.dumps(\n {\n \"job\": get_xr_dataset.__name__,\n \"status\": \"success\",\n \"params\": {\n \"data_type\": data_type,\n \"start\": start.strftime(\"%Y-%m-%d\"),\n \"duration\": duration,\n \"aggregate_method\": \"\",\n \"mask\": \"\",\n }, # should have some identifier for the transposition geom (mask)\n }\n )\n )\n except Exception as e:\n logging.error(\n json.dumps(\n {\n \"job\": get_xr_dataset.__name__,\n \"status\": \"failed\",\n \"params\": {\n \"data_type\": data_type,\n \"start\": start.strftime(\"%Y-%m-%d\"),\n \"duration\": duration,\n \"aggregate_method\": \"\",\n \"mask\": \"\",\n }, # should have some identifier for the transposition geom (mask)\n \"error\": str(e),\n }\n )\n )\n\n # read AORC data into xarray (aggregate)\n # this will be used for clustering/identifying storms\n aggregate_method = \"sum\"\n try:\n xsum = get_xr_dataset(\n data_type,\n start,\n duration,\n aggregate_method=aggregate_method,\n mask=transposition_geom,\n )\n logging.info(\n json.dumps(\n {\n \"job\": get_xr_dataset.__name__,\n \"status\": \"success\",\n \"params\": {\n \"data_type\": data_type,\n \"start\": start.strftime(\"%Y-%m-%d\"),\n \"duration\": duration,\n \"aggregate_method\": aggregate_method,\n \"mask\": \"\",\n }, # should have some identifier for the transposition geom (mask)\n }\n )\n )\n except Exception as e:\n logging.error(\n json.dumps(\n {\n \"job\": get_xr_dataset.__name__,\n \"status\": \"failed\",\n \"params\": {\n \"data_type\": data_type,\n \"start\": start.strftime(\"%Y-%m-%d\"),\n \"duration\": duration,\n \"aggregate_method\": aggregate_method,\n \"mask\": \"\",\n }, # should have some identifier for the transposition geom (mask)\n \"error\": str(e),\n }\n )\n )\n\n # determine target number of cells\n target_n_cells = number_of_cells(xsum, watershed_geom)\n\n # get precipitation numpy array\n data = xsum.APCP_surface.to_numpy()\n\n # run clustering algorithm\n clusterer = Clusterer(data, target_n_cells, minimum_threshold)\n cluster_labels = clusterer.db_cluster()\n\n # adjust clusters' sizes (multi-processing)\n args = list(\n product(\n [\n clusterer.get_cluster(cluster_labels, label)\n for label in np.unique(cluster_labels)\n if label > -1\n ],\n [target_n_cells],\n )\n )\n\n # will hold the final clusters\n final_clusters = []\n\n while args:\n with Pool(4) as p:\n results = p.starmap(adjust_cluster_size, args)\n\n # flatten results (potentially mixed returns of Clusters and lists)\n results = [\n *[cluster for cluster in results if isinstance(cluster, Cluster)],\n *[\n cluster\n for split_clusters in results\n if isinstance(split_clusters, list)\n for cluster in split_clusters\n ],\n ]\n\n # overwrite args with \"unfinished\" (split) clusters\n args = [cluster for cluster in results if cluster.size != target_n_cells]\n\n # add \"finished\" clusters to the final list\n final_clusters.extend(\n [cluster for cluster in results if cluster.size == target_n_cells]\n )\n\n # gather statistics on clusters (how to handle ties?)\n mean_ranks = rank_by_mean(final_clusters)\n mean_cluster = final_clusters[np.argmax(mean_ranks)]\n\n max_ranks = rank_by_max(final_clusters)\n max_cluster = final_clusters[np.argmax(max_ranks)]\n\n if duration <= 24:\n atlas_14_uri = f\"s3://tempest/transforms/atlas14/2yr{duration:02d}ha/2yr{duration:02d}ha.vrt\"\n else:\n # add check here that duration divisible by 24\n atlas_14_uri = f\"s3://tempest/transforms/atlas14/2yr{int(duration/24):02d}da/2yr{int(duration/24):02d}da.vrt\"\n\n xnorm = get_atlas14(atlas_14_uri, xsum.APCP_surface)\n inch_to_mm = 25.4\n norm_ranks = rank_by_norm(final_clusters, xnorm.to_numpy(), inch_to_mm)\n norm_cluster = final_clusters[np.argmax(norm_ranks)]\n\n # store cluster data (png, nosql)\n transform = xsum.rio.transform()\n cellsize_x = abs(transform[0])\n cellsize_y = abs(transform[4])\n\n # pngs - add mm to inch conversion\n\n # mean cluster\n clust_geom = cells_to_geometry(\n xsum.longitude.to_numpy(),\n xsum.latitude.to_numpy(),\n cellsize_x,\n cellsize_y,\n mean_cluster.cells,\n )\n plotter.cluster_plot(\n xsum,\n clust_geom,\n 0,\n scale_max,\n \"Accumulation (MM)\",\n png=os.path.join(png_dir, f\"{start_as_str}-mean.png\"),\n )\n\n # max cluster\n clust_geom = cells_to_geometry(\n xsum.longitude.to_numpy(),\n xsum.latitude.to_numpy(),\n cellsize_x,\n cellsize_y,\n max_cluster.cells,\n )\n plotter.cluster_plot(\n xsum,\n clust_geom,\n 0,\n scale_max,\n \"Accumulation (MM)\",\n png=os.path.join(png_dir, f\"{start_as_str}-max.png\"),\n )\n\n # mean cluster\n clust_geom = cells_to_geometry(\n xsum.longitude.to_numpy(),\n xsum.latitude.to_numpy(),\n cellsize_x,\n cellsize_y,\n norm_cluster.cells,\n )\n plotter.cluster_plot(\n xsum,\n clust_geom,\n 0,\n scale_max,\n \"Accumulation (MM)\",\n png=os.path.join(png_dir, f\"{start_as_str}-norm.png\"),\n )\n\n # write grid to dss\n\n write_dss(\n xdata,\n os.path.join(dss_dir, f\"{start_as_str}.dss\"),\n \"SHG4K\",\n domain_name.upper(),\n \"PRECIPITATION\",\n \"AORC\",\n resolution=4000,\n )\n\n\nif __name__ == \"__main__\":\n execution_time = datetime.now().strftime(\"%Y%m%d_%H%M\")\n logfile = f\"extract-storms-{execution_time}.log\"\n\n logger = set_up_logger()\n logger.setLevel(logging.DEBUG)\n\n args = sys.argv\n\n start = args[1]\n duration = args[2]\n domain_name = args[3]\n domain_uri = args[4]\n watershed_uri = args[5]\n minimum_threshold = args[6]\n dss_dir = args[7]\n png_dir = args[8]\n scale_max = args[9]\n\n main(\n start,\n duration,\n domain_name,\n domain_uri,\n watershed_uri,\n minimum_threshold,\n dss_dir,\n png_dir,\n scale_max,\n )\n","repo_name":"Dewberry/stormcloud","sub_path":"stormcloud/etl/extract_storms.py","file_name":"extract_storms.py","file_ext":"py","file_size_in_byte":10483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"17417759177","text":"# https://leetcode.com/problems/text-justification/\nimport math\nfrom typing import List\n\nfrom decorators import measure_execution_time\n\n\nclass Solution:\n maxWidth: int\n\n def append_to_output(self, temp_arr, temp_len, out):\n if len(temp_arr) > 1:\n spaces = math.floor((self.maxWidth - temp_len) / (len(temp_arr) - 1))\n extra_spaces = (self.maxWidth - temp_len) % (len(temp_arr) - 1)\n else:\n spaces = self.maxWidth - len(temp_arr[0])\n extra_spaces = 0\n\n temp_str = ''\n for i, temp_word in enumerate(temp_arr):\n spaces_len = spaces\n if extra_spaces:\n spaces_len += 1\n extra_spaces -= 1\n\n elif i >= len(temp_arr) - 1:\n spaces_len = 0\n\n if len(temp_arr) == 1:\n spaces_len = spaces\n\n temp_str += temp_word\n temp_str += ' ' * spaces_len\n out.append(\n temp_str,\n )\n\n def append_last_word_to_output(self, temp_arr, out, *args, **kwargs):\n temp_str = ' '.join(temp_arr)\n temp_str += ' ' * (self.maxWidth-len(temp_str))\n out.append(\n temp_str,\n )\n\n def fullJustify(self, words: List[str], maxWidth: int = 16) -> List[str]:\n out = []\n temp_arr = []\n temp_len = 0\n self.maxWidth = maxWidth\n for i, word in enumerate(words):\n\n if len(word) + temp_len + len(temp_arr) <= self.maxWidth:\n temp_arr.append(word)\n temp_len += len(word)\n\n else:\n self.append_to_output(temp_arr, temp_len, out)\n temp_arr = [word]\n temp_len = len(word)\n\n if temp_arr:\n self.append_last_word_to_output(temp_arr, out, temp_len)\n\n return out\n\n\ndef print_res(string_arr):\n print('[')\n for str in string_arr:\n print(f'\\t{str}: {len(str)}')\n print(']')\n\n\n@measure_execution_time\ndef main():\n in_val = {\n 'words': [\"What\",\"must\",\"be\",\"acknowledgment\",\"shall\",\"be\"],\n 'maxWidth': 16,\n }\n out = Solution().fullJustify(**in_val)\n print_res(out)\n expected_res = [\"What must be\",\"acknowledgment \",\"shall be \"]\n\n print_res(expected_res)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Tauassar/problemsolving","sub_path":"68_text_justification.py","file_name":"68_text_justification.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73081686550","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019-09-14 01:58\n@Author : Jann\n@Contact : l33klin@gmail.com\n@Site : \n@File : websocket_server.py\n@Desc : Description\n\"\"\"\n# WS server example\n\nimport time\nimport asyncio\nimport websockets\n\n\nasync def hello(websocket, path):\n name = await websocket.recv()\n print(f\"< {name}\")\n\n greeting = f\"Hello {name}!\"\n time.sleep(2)\n\n await websocket.send(greeting)\n print(f\"> {greeting}\")\n\nstart_server = websockets.serve(hello, \"localhost\", 8765)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n","repo_name":"l33klin/python_pratice","sub_path":"websocket/websocket_server.py","file_name":"websocket_server.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"17683353779","text":"#for queries examplefrom tutorial \nfrom sys import argv\nfrom aiida.orm.querybuilder import QueryBuilder\nfrom aiida.orm.data.remote import RemoteData\npath=\"/home/aiida/Documents/seb352-travail/essais-tuto/res/\"\nStructureData = DataFactory(\"structure\")\nParameterData = DataFactory(\"parameter\")\n\nqb1=QueryBuilder()\nqb2=QueryBuilder()\nqb3=QueryBuilder()\n#qb.append(Node, project=[\"id\"])\n\n#enumerate the for each query key\n#for node, in qb.iterall():\n#\tprint node\n#print\n#print(\"Number of species \"+str( qb.count()))\n\n#qb.append(StructureData, project=[\"id\", \"uuid\"], \n#\tfilters={\"or\":[\n#\t{\"id\":{\"==\":285}}, {\"id\":{\"==\":3512}} ] })\n\n\n#\tPour etablir des liens entre etats\nqb1.append(RemoteData, tag=\"remote\", project=[\"*\"])\nqb1.append(Group, group_of=\"remote\")\n\nqb2.append(RemoteData, project=[\"*\"])\n\nqb3.append(Group)\n\n\n#qb.append(ParameterData, project=[\"attributes.energy_smearing\"]) #, filters=)\n#qb.append(ParameterData, project=[\"attributes.element\"])\n\nf1=open(path+\"remoteData_Group\", 'w')\nf2=open(path+\"remoteData\", 'w')\nf3=open(path+\"Group\", 'w')\n\nfor i in qb1.iterall():\n\tf1.write(str(i)+\"\\n\")\n\nfor j in qb2.iterall():\n\tf2.write(str(j)+\"\\n\")\n\nfor k in qb3.iterall():\n\tf3.write(str(k)+\"\\n\")\n\n\n\nf1.close()\nf2.close()\nf3.close()\n\n","repo_name":"sebbienvenue/seb352-travail","sub_path":"essais-tuto/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"15331782678","text":"import re\nimport urllib.parse\nfrom socketserver import TCPServer\nfrom http.server import SimpleHTTPRequestHandler\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# example url http://127.0.0.1:8002/item?id=13713480\nWORD_LEN_TO_PROCESSING = 6\nPORT = 8002\nTARGER_DOMAIN = \"news.ycombinator.com\"\nPROXY_DOMAIN = f\"127.0.0.1:{PORT}\"\nTARGER_ADDRESS = f\"https://{TARGER_DOMAIN}\"\n\n\ndef replace_content(content: bytes) -> bytes:\n result = content\n soup = BeautifulSoup(content.decode(\"utf-8\"), \"html.parser\")\n body = soup.body\n regex = re.compile(r\"(\\W)(\\w{}{}{})(?!{})(\\W)\".format(\n \"{\", WORD_LEN_TO_PROCESSING, \"}\", \"\\N{TRADE MARK SIGN}\"))\n\n for txt in body.find_all(string=True):\n if regex.search(txt) and txt.parent.name != \"a\":\n newtext = regex.sub(\n \"{}{}{}\".format(r\"\\1\\2\", \"\\N{TRADE MARK SIGN}\", r\"\\3\"),\n txt,\n )\n txt.replace_with(newtext)\n\n for a in body.find_all(\"a\"):\n a[\"href\"] = a[\"href\"].replace(TARGER_DOMAIN, PROXY_DOMAIN)\n\n result = soup.encode(\"utf-8\")\n\n return result\n\n\nclass MyProxy(SimpleHTTPRequestHandler):\n def do_GET(self):\n full_url = urllib.parse.urljoin(TARGER_ADDRESS, self.path)\n r = requests.get(full_url)\n self.send_response(r.status_code)\n self.send_header(\"Content-type\", r.headers[\"Content-Type\"])\n self.end_headers()\n content = r.content\n\n if r.status_code == 200 and r.headers[\"Content-Type\"].startswith(\"text/html\"):\n content = replace_content(r.content)\n\n self.wfile.write(content)\n\n\nclass ReuseAddressTCPServer(TCPServer):\n allow_reuse_address = True\n\n\ndef main():\n with ReuseAddressTCPServer((\"\", PORT), MyProxy) as httpd:\n print(\"Now serving at\" + str(PORT))\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tyunn/p-proxy","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"3046514946","text":"import uuid\n\nfrom datetime import datetime\nfrom sqlalchemy import Integer, String, Boolean, DateTime\nfrom sqlalchemy import Column, MetaData, Table, ForeignKey\n\nENGINE = 'InnoDB'\nCHARSET = 'utf8'\n\n\ndef _populate_ptp_table(migrate_engine, meta, ptp, i_system):\n \"\"\"This function inserts all the initial data about journals,\n into the ptp table.\n \"\"\"\n\n sys = list(i_system.select().where(i_system.c.uuid is not None).execute())\n if len(sys) > 0:\n ptp_insert = ptp.insert()\n ptp_uuid = str(uuid.uuid4())\n values = {'created_at': datetime.now(),\n 'updated_at': None,\n 'deleted_at': None,\n 'uuid': ptp_uuid,\n 'enabled': False,\n 'mode': 'hardware',\n 'transport': 'l2',\n 'mechanism': 'e2e',\n 'system_id': sys[0].id,\n }\n ptp_insert.execute(values)\n\n\ndef upgrade(migrate_engine):\n\n meta = MetaData()\n meta.bind = migrate_engine\n\n i_system = Table('i_system', meta, autoload=True)\n ptp = Table(\n 'ptp',\n meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('deleted_at', DateTime),\n\n Column('id', Integer, primary_key=True, nullable=False),\n Column('uuid', String(36), unique=True),\n\n Column('enabled', Boolean, default=False),\n Column('mode', String(16), default='hardware'),\n Column('transport', String(4), default='l2'),\n Column('mechanism', String(4), default='e2e'),\n\n Column('system_id', Integer,\n ForeignKey('i_system.id', ondelete=\"CASCADE\"),\n nullable=True),\n\n mysql_engine=ENGINE,\n mysql_charset=CHARSET,\n )\n ptp.create()\n # Populate the new ptp table with the initial data\n _populate_ptp_table(migrate_engine, meta, ptp, i_system)\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n ptp = Table('ptp', meta, autoload=True)\n ptp.drop()\n","repo_name":"starlingx/config","sub_path":"sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/075_ptp.py","file_name":"075_ptp.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"49"} +{"seq_id":"39930896559","text":"from django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\nfrom .managers import ChannelManager\n\n\nclass Channel(AbstractBaseUser, PermissionsMixin):\n channel_id = models.CharField(\n _('username'),\n max_length=50,\n unique=True,\n error_messages={\n 'unique': _(\"A user with that channel_id already exists.\"),\n },\n )\n\n is_staff = models.BooleanField(\n _('staff status'),\n default=False,\n help_text=_('Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField(\n _('active'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n\n USERNAME_FIELD = 'channel_id'\n\n objects = ChannelManager()\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def __str__(self):\n return self.channel_id\n\n def get_short_name(self):\n return self.channel_id\n\n def get_full_name(self):\n return self.channel_id\n","repo_name":"MattBroach/TheRundown","sub_path":"backend/channels/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"14564053844","text":"def ListIndexFunction():\n\n \"\"\"\n Funcion que almacena strings en una lista con la funcion \"append\" y despues de tener 4 elementos te pide que busques la posicion de alguno\n usando la funcion \"index\".\n\n list = variable donde se alamcenan los strings\n \"\"\"\n\n list = []\n\n # ciclo infinito.\n while True:\n\n list.append(input(\"type a word \"))\n\n # ciclo que comienza cuando la lista tenga mas de 3 elementos.\n while len(list) > 3:\n\n try:\n\n print(list.index(input(\"search a entered word \")))\n\n except:\n # si el usuario introduce datos no deseados imprime este mensaje de error.\n print(\"the word is not registered in the list\")","repo_name":"angelopol/ListIndexFunction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"49914274727","text":"from __future__ import absolute_import, print_function, unicode_literals\nfrom builtins import dict, str\nimport os\nfrom copy import deepcopy\nfrom indra.preassembler.hierarchy_manager import hierarchies, HierarchyManager\nfrom indra.statements import get_valid_location, InvalidLocationError, Agent\nfrom indra.util import unicode_strs\n\nent_hierarchy = hierarchies['entity']\nmod_hierarchy = hierarchies['modification']\nact_hierarchy = hierarchies['activity']\ncomp_hierarchy = hierarchies['cellular_component']\n\ndef test_hierarchy_unicode():\n # Test all the hierarchies except the comp_hierarchy, which is an\n # RDF graph\n assert unicode_strs((ent_hierarchy.isa_closure,\n ent_hierarchy.partof_closure))\n assert unicode_strs((mod_hierarchy.isa_closure,\n mod_hierarchy.partof_closure))\n assert unicode_strs((act_hierarchy.isa_closure,\n act_hierarchy.partof_closure))\n\n\ndef test_isa_entity():\n assert(ent_hierarchy.isa('HGNC', 'BRAF', 'FPLX', 'RAF'))\n\n\ndef test_isa_entity2():\n assert(not ent_hierarchy.isa('HGNC', 'BRAF', 'HGNC', 'ARAF'))\n\n\ndef test_isa_entity3():\n assert(not ent_hierarchy.isa('FPLX', 'RAF', 'HGNC', 'BRAF'))\n\n\ndef test_partof_entity():\n assert ent_hierarchy.partof('FPLX', 'HIF_alpha', 'FPLX', 'HIF')\n\n\ndef test_isa_or_partof_entity():\n assert ent_hierarchy.isa_or_partof('HGNC', 'PRKAG1', 'FPLX', 'AMPK')\n\n\ndef test_partof_entity_not():\n assert not ent_hierarchy.partof('FPLX', 'HIF1', 'FPLX', 'HIF_alpha')\n\n\ndef test_isa_mod():\n assert(mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',\n 'INDRA_MODS', 'modification'))\n\ndef test_isa_mod_not():\n assert(not mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',\n 'INDRA_MODS', 'ubiquitination'))\n\ndef test_isa_activity():\n assert act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',\n 'INDRA_ACTIVITIES', 'activity')\n\ndef test_isa_activity_not():\n assert not act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',\n 'INDRA_ACTIVITIES', 'phosphatase')\n\ndef test_partof_comp():\n assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',\n 'INDRA_LOCATIONS', 'cell')\n\ndef test_partof_comp_not():\n assert not comp_hierarchy.partof('INDRA_LOCATIONS', 'cell',\n 'INDRA_LOCATIONS', 'cytoplasm')\n\ndef test_partof_comp_none():\n assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',\n 'INDRA_LOCATIONS', None)\n\ndef test_partof_comp_none_none():\n assert comp_hierarchy.partof('INDRA_LOCATIONS', None,\n 'INDRA_LOCATIONS', None)\n\ndef test_partof_comp_none_not():\n assert not comp_hierarchy.partof('INDRA_LOCATIONS', None,\n 'INDRA_LOCATIONS', 'cytoplasm')\n\ndef test_get_children():\n raf = 'http://identifiers.org/fplx/RAF'\n braf = 'http://identifiers.org/hgnc.symbol/BRAF'\n mapk = 'http://identifiers.org/fplx/MAPK'\n ampk = 'http://identifiers.org/fplx/AMPK'\n # Look up RAF\n rafs = ent_hierarchy.get_children(raf)\n # Should get three family members\n assert isinstance(rafs, list)\n assert len(rafs) == 3\n assert unicode_strs(rafs)\n # The lookup of a gene-level entity should not return any additional\n # entities\n brafs = ent_hierarchy.get_children(braf)\n assert isinstance(brafs, list)\n assert len(brafs) == 0\n assert unicode_strs(brafs)\n mapks = ent_hierarchy.get_children(mapk)\n assert len(mapks) == 12\n assert unicode_strs(mapks)\n # Make sure we can also do this in a case involving both family and complex\n # relationships\n ampks = ent_hierarchy.get_children(ampk)\n assert len(ampks) == 22\n ag_none = ''\n none_children = ent_hierarchy.get_children('')\n assert isinstance(none_children, list)\n assert len(none_children) == 0\n\ndef test_mtorc_children():\n mtorc1 = 'http://identifiers.org/fplx/mTORC1'\n mtorc2 = 'http://identifiers.org/fplx/mTORC2'\n ch1 = ent_hierarchy.get_children(mtorc1)\n ch2 = ent_hierarchy.get_children(mtorc2)\n assert('http://identifiers.org/hgnc.symbol/RICTOR' not in ch1)\n assert('http://identifiers.org/hgnc.symbol/RPTOR' not in ch2)\n\ndef test_mtorc_get_parents():\n rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'\n p = ent_hierarchy.get_parents(rictor, 'all')\n assert(len(p) == 1)\n assert(list(p)[0] == 'http://identifiers.org/fplx/mTORC2')\n\ndef test_mtorc_transitive_closure():\n rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'\n p = ent_hierarchy.partof_closure.get(rictor)\n assert(len(p) == 1)\n assert(p[0] == 'http://identifiers.org/fplx/mTORC2')\n\ndef test_mtorc_partof_no_tc():\n ent_hierarchy_no_tc = deepcopy(ent_hierarchy)\n ent_hierarchy_no_tc.isa_closure = {}\n ent_hierarchy_no_tc.partof_closure = {}\n assert(ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC1'))\n assert(not ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC2'))\n\ndef test_erk_isa_no_tc():\n ent_hierarchy_no_tc = deepcopy(ent_hierarchy)\n ent_hierarchy_no_tc.isa_closure = {}\n ent_hierarchy_no_tc.partof_closure = {}\n assert(ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'MAPK'))\n assert(not ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'JNK'))\n\ndef test_get_parents():\n prkaa1 = 'http://identifiers.org/hgnc.symbol/PRKAA1'\n ampk = 'http://identifiers.org/fplx/AMPK'\n p1 = ent_hierarchy.get_parents(prkaa1, 'all')\n assert(len(p1) == 8)\n assert(ampk in p1)\n p2 = ent_hierarchy.get_parents(prkaa1, 'immediate')\n assert(len(p2) == 7)\n assert (ampk not in p2)\n p3 = ent_hierarchy.get_parents(prkaa1, 'top')\n assert(len(p3) == 1)\n assert (ampk in p3)\n\n\ndef test_load_eidos_hierarchy():\n eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../sources/eidos/eidos_ontology.rdf')\n eidos_ns = 'https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#'\n hm = HierarchyManager(eidos_ont, True, True)\n assert hm.isa_closure\n eidos_isa = lambda a, b: hm.isa('UN', a, 'UN', b)\n assert eidos_isa('UN/events/human/conflict',\n 'UN/events/human')\n assert not eidos_isa('UN/events/human/conflict',\n 'UN/events/human/human_migration')\n assert eidos_isa('UN/entities/human/infrastructure',\n 'UN/entities')\n assert eidos_isa('UN/events/natural_disaster/storm',\n 'UN/events')\n assert not eidos_isa('UN/events',\n 'UN/events/natural/weather/storm')\n\n\ndef test_load_trips_hierarchy():\n trips_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../sources/cwms/trips_ontology.rdf')\n hm = HierarchyManager(trips_ont, True, True)\n assert hm.isa_closure\n trips_isa = lambda a, b: hm.isa('CWMS', a, 'CWMS', b)\n assert trips_isa('ONT::TRUCK', 'ONT::VEHICLE')\n assert not trips_isa('ONT::VEHICLE', 'ONT::TRUCK')\n assert trips_isa('ONT::MONEY', 'ONT::PHYS-OBJECT')\n assert trips_isa('ONT::TABLE', 'ONT::MANUFACTURED-OBJECT')\n\n\ndef test_load_hume_hierarchy():\n hume_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../sources/hume/hume_ontology.rdf')\n hm = HierarchyManager(hume_ont, True, True)\n assert hm.isa_closure\n hume_isa = lambda a, b: hm.isa('HUME', a, 'HUME', b)\n assert hume_isa('entity/academic_discipline', 'entity')\n assert not hume_isa('entity', 'entity/academic_discipline')\n assert hume_isa('event/healthcare/human_disease',\n 'event/healthcare')\n\ndef test_same_components():\n uri_prkag1 = ent_hierarchy.get_uri('HGNC', 'PRKAG1')\n uri_ampk = ent_hierarchy.get_uri('FPLX', 'AMPK')\n\n c1 = ent_hierarchy.components[uri_prkag1]\n c2 = ent_hierarchy.components[uri_ampk]\n assert(c1 == c2)\n","repo_name":"budakn/INDRA","sub_path":"indra/tests/test_hierarchy_manager.py","file_name":"test_hierarchy_manager.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"1135248355","text":"from django.urls import path\n\nfrom movies import views\n\nurlpatterns = [\n path('list/', views.movie_list_view, name='list'),\n path('image/', views.image_view, name=\"image\"),\n path(\n 'upload-poster/',\n views.upload_poster,\n name=\"upload-poster\"\n ),\n path('add/', views.add_movie, name='add')\n]\n","repo_name":"shashank-k-y/PosterManagement","sub_path":"movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"22852473845","text":"from .exceptions import (ItemNotFoundError, NoPathToItem)\n\n\ndef path_to_location(modulestore, usage_key):\n '''\n Try to find a course_id/chapter/section[/position] path to location in\n modulestore. The courseware insists that the first level in the course is\n chapter, but any kind of module can be a \"section\".\n\n Args:\n modulestore: which store holds the relevant objects\n usage_key: :class:`UsageKey` the id of the location to which to generate the path\n\n Raises\n ItemNotFoundError if the location doesn't exist.\n NoPathToItem if the location exists, but isn't accessible via\n a chapter/section path in the course(s) being searched.\n\n Returns:\n a tuple (course_id, chapter, section, position) suitable for the\n courseware index view.\n\n If the section is a sequential or vertical, position will be the children index\n of this location under that sequence.\n '''\n\n def flatten(xs):\n '''Convert lisp-style (a, (b, (c, ()))) list into a python list.\n Not a general flatten function. '''\n p = []\n while xs != ():\n p.append(xs[0])\n xs = xs[1]\n return p\n\n def find_path_to_course():\n '''Find a path up the location graph to a node with the\n specified category.\n\n If no path exists, return None.\n\n If a path exists, return it as a list with target location first, and\n the starting location last.\n '''\n # Standard DFS\n\n # To keep track of where we came from, the work queue has\n # tuples (location, path-so-far). To avoid lots of\n # copying, the path-so-far is stored as a lisp-style\n # list--nested hd::tl tuples, and flattened at the end.\n queue = [(usage_key, ())]\n while len(queue) > 0:\n (next_usage, path) = queue.pop() # Takes from the end\n\n # get_parent_location raises ItemNotFoundError if location isn't found\n parent = modulestore.get_parent_location(next_usage)\n\n # print 'Processing loc={0}, path={1}'.format(next_usage, path)\n if next_usage.block_type == \"course\":\n # Found it!\n path = (next_usage, path)\n return flatten(path)\n elif parent is None:\n # Orphaned item.\n return None\n\n # otherwise, add parent locations at the end\n newpath = (next_usage, path)\n queue.append((parent, newpath))\n\n with modulestore.bulk_operations(usage_key.course_key):\n if not modulestore.has_item(usage_key):\n raise ItemNotFoundError(usage_key)\n\n path = find_path_to_course()\n if path is None:\n raise NoPathToItem(usage_key)\n\n n = len(path)\n course_id = path[0].course_key\n # pull out the location names\n chapter = path[1].name if n > 1 else None\n section = path[2].name if n > 2 else None\n # Figure out the position\n position = None\n\n # This block of code will find the position of a module within a nested tree\n # of modules. If a problem is on tab 2 of a sequence that's on tab 3 of a\n # sequence, the resulting position is 3_2. However, no positional modules\n # (e.g. sequential and videosequence) currently deal with this form of\n # representing nested positions. This needs to happen before jumping to a\n # module nested in more than one positional module will work.\n if n > 3:\n position_list = []\n for path_index in range(2, n - 1):\n category = path[path_index].block_type\n if category == 'sequential' or category == 'videosequence':\n section_desc = modulestore.get_item(path[path_index])\n # this calls get_children rather than just children b/c old mongo includes private children\n # in children but not in get_children\n child_locs = [c.location for c in section_desc.get_children()]\n # positions are 1-indexed, and should be strings to be consistent with\n # url parsing.\n position_list.append(str(child_locs.index(path[path_index + 1]) + 1))\n position = \"_\".join(position_list)\n\n return (course_id, chapter, section, position)\n","repo_name":"jruiperezv/ANALYSE","sub_path":"common/lib/xmodule/xmodule/modulestore/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"49"} +{"seq_id":"17955882914","text":"\"\"\"\nAuthor: Michael Moore\nDate: 10/25/20\nThis program scrapes url for data and returns it in a pretty form.\n\"\"\"\n\nimport requests, bs4\nurl = 'https://www.dmacc.edu/programs/Pages/welcome.aspx'\nresponse = requests.get(url)\nhtml = response.content\nf = open(\"request_result.txt\",\"w+\")\nf.writelines(str(html))\nsoup = bs4.BeautifulSoup(open(\"request_result.txt\"), 'html.parser')\nprint(soup.prettify())\n\n","repo_name":"mrmoore6/Module-9","sub_path":"file scraper.py","file_name":"file scraper.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"12801643855","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n# module\nclass MultiLinear(nn.Module):\n \"\"\" linear layer network with given depth and hidden units\n input (Cin,) -> (Cout,)\"\"\"\n def __init__(self, \n input_length: int, \n output_length: int,\n linearlayer_connection: tuple,\n activation: nn.Module = None\n ):\n super().__init__()\n\n self.output_length = output_length\n self.input_length = input_length\n prev_connection = self.input_length\n\n self.activation = activation or nn.ReLU()\n\n linear_sequence = []\n for nconnection in linearlayer_connection:\n linear_sequence.append(nn.Linear(prev_connection, nconnection))\n linear_sequence.append(self.activation)\n prev_connection = nconnection\n linear_sequence.append(nn.Linear(prev_connection, output_length)) \n \n self.linearlayer = nn.Sequential(*linear_sequence)\n\n @property\n def input_shape(self):\n return self.input_length\n\n @property\n def output_shape(self):\n return self.output_length\n\n def forward(self, x):\n out = self.linearlayer(x)\n return out\n\n\nclass ConvNet(nn.Module):\n \"\"\"hyper parameters:\n number of convolution layer [1, 2, 3, 4]\n dropout1: (0 - 0.7) \n dropout2: \n channel_increase [8, 16, 32, 64]\n linear unit: [16, 32, 64, 128, 256]\n \"\"\"\n input_pixel = 28\n input_channel = 1\n output_size = 10\n\n def __init__(self, \n nconvolution: int = 2,\n initial_channel: int = 32,\n dropout1: float = 0.25,\n dropout2: float = 0.5,\n n_linear: int = 128\n ):\n super().__init__()\n\n convs = []\n input_channel = self.input_channel\n for i in range(nconvolution):\n if i == 0:\n output_channel = initial_channel\n else:\n output_channel = input_channel * 2\n convs.append(nn.Sequential(\n nn.Conv2d(input_channel, output_channel, 3, 1, padding=1),\n nn.ReLU(),\n nn.Dropout(dropout1)\n ))\n input_channel = output_channel\n\n \n self.convolutions = torch.nn.ModuleList(convs)\n # image is output_channel * 28 * 28\n self.pooling = nn.MaxPool2d(2)\n\n self.dropout = nn.Dropout(dropout2)\n self.fc1 = nn.Linear(int(output_channel * self.input_pixel ** 2 / 4), n_linear)\n self.fc2 = nn.Linear(n_linear, self.output_size)\n\n self.c1 = nn.Conv2d(1, 32, 3, 1, padding= 1)\n self.c2 = nn.ReLU()\n self.c3 = nn.Dropout(dropout1)\n\n def forward(self, x):\n for conv in self.convolutions:\n x = conv(x)\n x = self.pooling(x)\n\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n","repo_name":"whzhangg/torch_examples","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"42536551613","text":"import sys\nfrom typing import cast\nfrom unittest import mock\n\nif sys.version_info >= (3, 9):\n import zoneinfo\nelse:\n from backports import zoneinfo\n\nimport pytest\nfrom forestadmin.agent_toolkit.utils.context import User\nfrom forestadmin.datasource_toolkit.collections import Collection\nfrom forestadmin.datasource_toolkit.interfaces.fields import FieldType, ManyToMany, Operator\nfrom forestadmin.datasource_toolkit.interfaces.query.condition_tree.nodes.branch import Aggregator, ConditionTreeBranch\nfrom forestadmin.datasource_toolkit.interfaces.query.condition_tree.nodes.leaf import ConditionTreeLeaf\nfrom forestadmin.datasource_toolkit.interfaces.query.filter.factory import FilterFactory, FilterFactoryException\nfrom forestadmin.datasource_toolkit.interfaces.query.filter.paginated import PaginatedFilter\nfrom forestadmin.datasource_toolkit.interfaces.query.filter.unpaginated import Filter\nfrom forestadmin.datasource_toolkit.interfaces.query.projections import Projection\n\nmocked_caller = User(\n rendering_id=1,\n user_id=1,\n tags={},\n email=\"dummy@user.fr\",\n first_name=\"dummy\",\n last_name=\"user\",\n team=\"operational\",\n timezone=zoneinfo.ZoneInfo(\"Europe/Paris\"),\n)\n\n\n@mock.patch(\"forestadmin.datasource_toolkit.interfaces.query.filter.factory.time_transforms\")\ndef test_shift_period_filter(mock_time_transform: mock.MagicMock):\n shift_period_filter_replacer = FilterFactory._shift_period_filter(\"UTC\") # type: ignore\n leaf = ConditionTreeLeaf(field=\"test\", operator=Operator.PREVIOUS_YEAR)\n mock_replacer = mock.MagicMock(return_value=\"fake_replacer\")\n mock_time_transform.return_value = {Operator.PREVIOUS_YEAR: [{\"replacer\": mock_replacer}]}\n with mock.patch(\n \"forestadmin.datasource_toolkit.interfaces.query.filter.factory.SHIFTED_OPERATORS\", {Operator.PREVIOUS_YEAR}\n ):\n assert shift_period_filter_replacer(leaf) == \"fake_replacer\"\n mock_time_transform.assert_called_once_with(1)\n mock_replacer.assert_called_once_with(leaf, \"UTC\")\n\n with mock.patch(\"forestadmin.datasource_toolkit.interfaces.query.filter.factory.SHIFTED_OPERATORS\", {}):\n with pytest.raises(FilterFactoryException):\n shift_period_filter_replacer(leaf)\n\n\n@mock.patch(\"forestadmin.datasource_toolkit.interfaces.query.filter.factory.FilterFactory._shift_period_filter\")\ndef test_get_previous_period_filter(mock_shifted_period: mock.MagicMock):\n leaf = ConditionTreeLeaf(field=\"test\", operator=Operator.PREVIOUS_MONTH)\n filter = Filter({\"condition_tree\": leaf, \"timezone\": zoneinfo.ZoneInfo(\"Europe/Paris\")})\n with mock.patch.object(filter, \"override\") as override_mock:\n with mock.patch.object(leaf, \"replace\") as replace_override:\n override_mock.return_value = \"fake_override\"\n mock_shifted_period.return_value = \"fake_shift_period\"\n replace_override.return_value = \"fake_replace\"\n assert FilterFactory.get_previous_period_filter(filter) == \"fake_override\"\n override_mock.assert_called_once_with({\"condition_tree\": \"fake_replace\"})\n replace_override.assert_called_once_with(\"fake_shift_period\")\n mock_shifted_period.assert_called_once_with(filter.timezone)\n\n filter = Filter({\"timezone\": zoneinfo.ZoneInfo(\"UTC\")})\n with pytest.raises(FilterFactoryException):\n FilterFactory.get_previous_period_filter(filter)\n\n\n@pytest.mark.asyncio\nasync def test_make_through_filter():\n with mock.patch(\n \"forestadmin.datasource_toolkit.interfaces.query.filter.factory.CollectionUtils.get_value\",\n new_callable=mock.AsyncMock,\n ) as mock_get_value:\n with mock.patch(\n \"forestadmin.datasource_toolkit.interfaces.query.filter.factory.FilterFactory.make_foreign_filter\",\n new_callable=mock.AsyncMock,\n ) as mock_make_foreign_filter:\n with mock.patch.object(Collection, \"__abstractmethods__\", new_callable=set):\n collection = Collection(name=\"test\", datasource=mock.MagicMock()) # type: ignore\n collection.schema[\"fields\"] = {\n \"fake_relation\": {\n \"type\": FieldType.MANY_TO_ONE,\n \"foreign_collection\": \"fake\",\n \"foreign_key\": \"test_id\",\n \"foreign_key_target\": \"id\",\n }\n }\n\n # test with nestable PaginatedFilter\n mock_get_value.return_value = \"fake_value\"\n collection.schema[\"fields\"] = {\n \"parent\": {\n \"type\": FieldType.MANY_TO_MANY,\n \"through_collection\": \"association\",\n \"foreign_collection\": \"parent\",\n \"foreign_key\": \"parent_id\",\n \"foreign_key_target\": \"id\",\n \"origin_key\": \"child_id\",\n \"origin_key_target\": \"id\",\n \"foreign_relation\": \"parent\",\n }\n }\n\n mock_collection = mock.MagicMock()\n mock_collection.list = mock.AsyncMock(return_value=[{\"id\": 1}])\n with mock.patch.object(collection.datasource, \"get_collection\", return_value=mock_collection):\n res = await FilterFactory.make_through_filter(\n mocked_caller,\n collection,\n [1],\n \"parent\",\n PaginatedFilter(\n {\n \"timezone\": zoneinfo.ZoneInfo(\"UTC\"),\n }\n ),\n )\n\n assert res.condition_tree.aggregator == Aggregator.AND\n assert ConditionTreeLeaf(\"child_id\", Operator.EQUAL, \"fake_value\") in res.condition_tree.conditions\n assert ConditionTreeLeaf(\"parent_id\", Operator.IN, [1]) in res.condition_tree.conditions\n\n mock_get_value.assert_called_once_with(mocked_caller, collection, [1], \"id\")\n mock_get_value.reset_mock()\n\n # test with unnestable PaginatedFilter\n fake_collection = mock.Mock(name=\"fake_collection\", spec=Collection)\n fake_collection.list = mock.AsyncMock(\n return_value=[\n {\"id\": \"fake_record_1\"},\n {\"id\": \"fake_record_2\"},\n ]\n )\n\n fake_datasource = mock.MagicMock()\n fake_datasource.get_collection = mock.MagicMock(return_value=fake_collection)\n collection._datasource = fake_datasource # type: ignore\n\n mock_make_foreign_filter.reset_mock()\n mock_make_foreign_filter.return_value = \"fake_filter\"\n mock_get_value.return_value = \"fake_value\"\n\n with mock.patch(\n \"forestadmin.datasource_toolkit.interfaces.query.filter.factory.CollectionUtils.get_through_target\",\n return_value=\"association\",\n ):\n res = await FilterFactory.make_through_filter(\n mocked_caller,\n collection,\n [1],\n \"parent\",\n PaginatedFilter(\n {\n \"search\": \"a\",\n \"timezone\": zoneinfo.ZoneInfo(\"UTC\"),\n }\n ),\n )\n mock_get_value.assert_called_once_with(mocked_caller, collection, [1], \"id\")\n fake_datasource.get_collection.assert_called_once_with(\"parent\") # type: ignore\n mock_make_foreign_filter.assert_called_once_with(\n mocked_caller,\n collection,\n [1],\n cast(ManyToMany, collection.schema[\"fields\"][\"parent\"]),\n PaginatedFilter(\n {\n \"search\": \"a\",\n \"timezone\": zoneinfo.ZoneInfo(\"UTC\"),\n }\n ),\n )\n fake_collection.list.assert_called_once_with(\n mocked_caller, \"fake_filter\", Projection(\"id\")\n ) # type: ignore\n assert res == PaginatedFilter(\n {\n \"condition_tree\": ConditionTreeBranch(\n Aggregator.AND,\n conditions=[\n ConditionTreeLeaf(\"child_id\", Operator.EQUAL, \"fake_value\"),\n ConditionTreeLeaf(\"parent_id\", Operator.IN, [\"fake_record_1\", \"fake_record_2\"]),\n ],\n )\n }\n )\n","repo_name":"ForestAdmin/agent-python","sub_path":"src/datasource_toolkit/tests/interfaces/query/filter/test_factory.py","file_name":"test_factory.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"49"} +{"seq_id":"27460161388","text":"import random\nfrom datetime import date\nfrom faker import Faker\nfrom typing import Type\n\nclass Worker():\n def __init__(self) -> None:\n self.name = ''\n self.__registration = ''\n self.__admission = ''\n self.__salary = 'N/A'\n self._dept = 'N/A'\n self._level = 'N/A2'\n self._office = 'N/A'\n\n @staticmethod\n def select_randon() -> dict:\n worker_data = {}\n new_dept = random.choice([\"Finance\", \"Mkt\", \"Commercial\", \"TI\"])\n worker_data['dept'] = new_dept\n new_office = random.choice([\"main\", \"coworking\", \"remote\"])\n worker_data['office'] = new_office\n new_level = random.choice([\"Analyst Jr\", \"Analyst Mid\", \"Analyst Sr\"])\n worker_data['level'] = new_level\n return worker_data\n\n\n# Dados - Workers\n# [{\n# \"name\": \"Indiv_str\",\n# \"registration\": \"Indiv_int\",\n# \"admission\": \"MM/AAAA\",\n# \"dept\": [\"Finance\", \"Mkt\", \"Commercial\", \"TI\"],\n# \"level\": [\"Analyst\", \"Coord\", \"Manager\"],\n# \"office\": [\"main\", \"cowork\", \"remote\"]\n# \"salary\": int(),\n# \"costcenter\": \"C+first_letter_dept+first_letter_level\",\n# \"perf_indicator\": [\"decreasing\", \"regular\", \"evolving\"]\n# }]\n\nif __name__ == '__main__':\n data = date(2002, 4, 28)\n data3 = data.strftime('%d/%m/%Y') # 28/04/2002\n\n # GERANDO UM NOME FAKE - OK\n fake = Faker()\n test_name = fake.name()\n print(test_name)\n","repo_name":"afcoelhodev/management_app","sub_path":"StaffData/src/service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"36501221737","text":"\"\"\"\r\nThe following program calculates the spectral radiance emitted by a black body, for a user defined wavelength\r\nand temperature.\r\n\r\nTo run this code, download the Anaconda Python distribution, and copy this code in the Syder editor. No external\r\nlibraries need to be imported (as of July 2018).\r\n\"\"\"\r\n\r\nimport math\r\n\r\n#Parameters to calculate spectral radiance\r\nc1 = 3.74e-16 # Wm^2 (first radiation constant)\r\nc2 = 1.44e-2 # m-K (second radiation constant)\r\nn = 1.0028 # index of refraction, assumes air\r\n\r\n# User inputs\r\nwavelength = float(input('Enter the wavelength at which you want to calculate the spectral radiance (in microns):'))\r\ntemperature = float(input('Enter the temperature at which you want to calculate the spectral radiance (in Kelvin):'))\r\n\r\n#Spectral radiance calculation\r\nwavelengthMeters = wavelength*1e-6 #Covert wavelength from microns to meters\r\nspectralRadiance = (c1/(math.pi*math.pow(n,2)*math.pow(wavelengthMeters,5)))*(1/((math.exp(c2/(n*wavelengthMeters*temperature)))-1))\r\n\r\n#Print output\r\nprint(\"The Spectral Radiance emmited by the black body at a temperature of \",temperature,\" K and wavelength of \",wavelength,\" microns is \",spectralRadiance,\" W/(m^2-sr-m)\")","repo_name":"richienagi/Physics","sub_path":"spectral radiance calculator.py","file_name":"spectral radiance calculator.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"14642529214","text":"\"\"\"\r\nAmy Butler\r\nProblem Set 1\r\n\"\"\"\r\nimport numpy as np\r\n\r\n#%% Question 0\r\n\r\na1 = np.array([1,2,3,1])\r\na2 = np.asmatrix(a1)\r\n\r\nb1 = np.array(([1],[0],[1],[5]))\r\nb2 = np.asmatrix(b1)\r\n\r\nA1 = np.array([[1,3,5],[2,4,6],[7,9,11]])\r\nA2 = np.asmatrix(A1)\r\n\r\nB1 = np.array([[1,0,0],[0,1,0],[0,0,1]])\r\nB2 = np.asmatrix(B1)\r\n\r\n#%% Question 1\r\n\r\nx = np.array([[1,3],[2,4]])\r\ny = np.array([[1,0],[0,1]])\r\nz = np.array([[x,x],[y,y]])\r\n\r\nz[0:1,0:1]\r\n\r\n#%% Question 2\r\n\r\na2_transpose = np.transpose(a2)\r\nA2_transpose = np.transpose(A2)\r\n\r\n(a2)@(a2_transpose)\r\n\r\n(b2)+(a2_transpose)\r\n\r\n(A2)@(A2_transpose)\r\n\r\n(A2)**3\r\n\r\n(A2)@(B2)\r\n\r\n#%% Question 3\r\n\r\nA = np.random.normal(size=(10,5))\r\nB = np.random.normal(size=(5,10))\r\nC = A@B\r\nnp.fill_diagonal(C,1)\r\n\r\n#%% Question 4\r\n\r\nD = np.random.normal(size=(20,15))\r\nE = np.random.normal(size=(15,20))\r\n\r\nF=D@E\r\nF[F<=0]=0.5\r\n\r\n#%% Question 5\r\n\r\nnp.reshape(D,(3,10,10))\r\nnp.reshape(E,(3,10,10))\r\n\r\n#%% Question 6\r\n\r\nx = np.arange(100.0)\r\n\r\nnp.reshape(x,(10,10))\r\nnp.reshape(x,(20,5))\r\nnp.reshape(x,(10,10,1))\r\n\r\n#%% Question 7\r\n\r\nx=np.reshape(np.arange(100.0),(5,20))\r\n\r\nnp.ravel(x)[1:102:2]\r\nnp.ndarray.flatten(x)[1:102:2]\r\nx.flat[1:102:2]\r\n\r\n#%% Question 8\r\n\r\nx=np.array([[16,10],[13,11]])\r\ny=5\r\nz=np.array([[16,10],[13,11],[12,33]])\r\nz_transpose=np.transpose(z)\r\ny_array1=np.tile(y,(2,3))\r\ny_array2=np.tile(y,(1,3))\r\n\r\n#Top of the chart:\r\nnp.hstack((x,y_array1))\r\n#Bottom of the chart:\r\nnp.hstack((z,(np.vstack((z_transpose,y_array2)))))\r\n#The entire chart:\r\nm=np.vstack(((np.hstack((x,y_array1))),(np.hstack((z,(np.vstack((z_transpose,y_array2))))))))\r\nprint(m)\r\n\r\nnp.shape(m) #Shape is (5,5)\r\nnp.ndim(m) #Dimesion is 2\r\nm[2:4, 2:5] #Extracts z_transpose\r\n\r\n#%% Question 9\r\n\r\ndiagonal=np.diag(m)\r\nzero_array=np.zeros((5,5))\r\nnp.fill_diagonal(zero_array,diagonal)\r\nprint(zero_array)\r\n\r\nnp.linalg.eig(m)\r\nnp.linalg.matrix_rank(m)\r\nnp.linalg.det(m)\r\nnp.linalg.inv(m)\r\nnp.matrix.trace(m)\r\n","repo_name":"amybutler880/QC_Econometrics_387","sub_path":"Problem_Set_1.py","file_name":"Problem_Set_1.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73827427029","text":"\n# Total size of the dataset\nTOTAL_TRAIN = 86317\nTOTAL_VAL = 10778\n\n# Sample size of dataset for training purpose\nSIZE= 0.1 # modify this only\nSIZE_TRAIN = int(TOTAL_TRAIN*SIZE)\nSIZE_VAL = int(TOTAL_VAL)\n\n# Parameters of the data (do not change)\nIMG_DIM = 65\nNUM_CLASSES = 4\n","repo_name":"claramartiny/droughtwatch","sub_path":"droughtwatch/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"49"} +{"seq_id":"5032458484","text":"import re\nimport tvm\nfrom tvm import autotvm\nfrom tvm.autotvm.task import get_config\nfrom tvm.autotvm.task.topi_integration import deserialize_args\nfrom ..nn.conv2d import _get_workload as _get_conv2d_workload\nfrom .. import generic, tag\nfrom ..generic import conv2d as conv2d_generic\nfrom ..util import get_const_tuple\nfrom ..nn.conv2d import conv2d_NCHWc_int8\nfrom .. import nn\nfrom . import conv2d_avx_1x1, conv2d_avx_common\n\ndef _get_default_config_int8(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False,\n layout='NCHW'):\n \"\"\"\n Get default schedule config for the workload\n \"\"\"\n assert not is_depthwise, \"Depthwise Int8 not supported\"\n wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype, layout)\n is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1\n if is_kernel_1x1:\n conv2d_generic.fallback_schedule_cpu_1x1_int8(\n cfg, wkl, int32_lanes=16, num_int8_elements=4)\n else:\n conv2d_generic.fallback_schedule_cpu_common_int8(\n cfg, wkl, int32_lanes=16, num_int8_elements=4)\n\n\ndef _is_int8_hw_support(data_dtype, kernel_dtype):\n \"\"\"\n Checks to ensure that we can use Intel DLBoost instructions\n 1) The datatypes are correct.\n 2) LLVM version has support for the instructions.\n 3) Target is skylake and above.\n \"\"\"\n # 1) Check datatypes\n is_dtype_support = data_dtype == 'uint8' and kernel_dtype == 'int8'\n\n # 2) Check LLVM support\n llvm_version = tvm.codegen.llvm_version_major()\n is_llvm_support = llvm_version >= 8\n\n # 3) Check target\n mcpu = tvm.target.current_target().mcpu\n is_target_support = False\n if mcpu == 'skylake-avx512' or mcpu == 'cascadelake':\n is_target_support = True\n\n return is_dtype_support and is_llvm_support and is_target_support\n\n\ndef _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, layout):\n \"\"\"Create schedule configuration from input arguments\"\"\"\n dshape = get_const_tuple(data.shape)\n kshape = get_const_tuple(kernel.shape)\n pat = re.compile(r'NCHW.+(\\d+)c')\n if layout == 'NCHW':\n n, ic, h, w = dshape\n oc, _, kh, kw = kshape\n elif layout == 'NHWC':\n n, h, w, ic = dshape\n kh, kw, oc, _ = kshape\n elif pat.match(layout) is not None:\n n, ic_chunk, h, w, ic_bn = dshape\n target = tvm.target.current_target(allow_none=False)\n oc_chunk, k_ic, kh, kw, k_ic_f, oc_bn, k_ic_s = kshape\n ic = ic_chunk * ic_bn\n assert ic == k_ic * k_ic_f * k_ic_s\n oc = oc_chunk*oc_bn\n else:\n raise ValueError(\"Not support this layout {} with \"\n \"schedule template.\".format(layout))\n\n is_kernel_1x1 = kh == 1 and kw == 1\n ph, pw = padding if isinstance(padding, (tuple, list)) else (padding, padding)\n sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)\n oh = (h - kh + 2 * ph) // sh + 1\n ow = (w - kw + 2 * pw) // sw + 1\n\n # Create schedule config\n cfg.define_split('tile_ic', ic, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)\n cfg.define_split('tile_oc', oc, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)\n cfg.define_split(\"tile_ow\", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)\n if is_kernel_1x1:\n cfg.define_knob(\"tile_oh\", [1, 2] if oh > 1 else [1])\n else:\n cfg.define_knob(\"unroll_kw\", [True, False])\n\n\n# Define template function for autotvm task\n# We define schedule template in this function instead of\n# declaration function since actual input arguments need\n# to be altered by the schedule selected.\n@autotvm.task.register(\"topi_x86_conv2d_NCHWc_int8\")\ndef _topi_nn_conv2d_NCHWc_int8(*args, **kwargs):\n assert not kwargs, \"Do not support kwargs in template function call\"\n args = deserialize_args(args)\n\n if len(args) == 7:\n data, kernel, strides, padding, dilation, origin_layout, dtype = args\n else:\n assert len(args) == 8\n data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args\n\n raw_data_shape = get_const_tuple(data.shape)\n raw_kernel_shape = get_const_tuple(kernel.shape)\n\n # get config here\n cfg = get_config()\n _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, origin_layout)\n\n # change shape with the value in config\n ic_bn, oc_bn, ow_bn = (cfg[\"tile_ic\"].size[-1], cfg[\"tile_oc\"].size[-1],\n cfg[\"tile_ow\"].size[-1])\n\n data_layout = \"NCHW%dc\" % ic_bn\n out_layout = \"NCHW%dc\" % oc_bn\n\n # Set up the new shape for data and kernel\n new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn,\n raw_data_shape[2], raw_data_shape[3], ic_bn)\n n_elems = 4\n new_kernel_shape = (raw_kernel_shape[0] // oc_bn,\n raw_kernel_shape[1] // ic_bn,\n raw_kernel_shape[2],\n raw_kernel_shape[3],\n ic_bn // n_elems,\n oc_bn,\n n_elems)\n\n new_data = tvm.placeholder(new_data_shape, data.dtype)\n new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype)\n\n C = _declaration_conv_NCHWc_int8(cfg, new_data, new_kernel, strides, padding, dilation,\n data_layout, out_layout, dtype)\n s = _schedule_conv2d_NCHWc_int8(cfg, [C])\n return s, [new_data, new_kernel, C]\n\n\n@autotvm.register_topi_compute(conv2d_NCHWc_int8, 'cpu', 'direct')\ndef _declaration_conv_NCHWc_int8(cfg, data, kernel, strides,\n padding, dilation, layout, out_layout, out_dtype):\n return nn.conv2d_NCHWc_int8_compute(data,\n kernel,\n strides,\n padding,\n dilation,\n layout,\n out_layout,\n out_dtype)\n\n\n@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, 'cpu', ['direct'])\ndef _schedule_conv2d_NCHWc_int8(cfg, outs):\n \"\"\"Create schedule for tensors\"\"\"\n s = tvm.create_schedule([x.op for x in outs])\n scheduled_ops = []\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc_int8' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n args = [s, cfg, data_vec, conv_out, outs[0]]\n target = tvm.target.current_target(allow_none=False)\n # int8 conv kernel is 7-dim\n _, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape)\n if kh == 1 and kw == 1:\n conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)\n else:\n conv2d_avx_common._schedule_conv_NCHWc_int8(*args)\n\n scheduled_ops.append(op)\n\n traverse(outs[0].op)\n return s\n\n@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc_pack, 'cpu', ['direct'])\ndef schedule_conv2d_nhwc_pack(cfg, outs):\n \"\"\"Create schedule for tensors\"\"\"\n s = tvm.create_schedule([x.op for x in outs])\n output_op = outs[0].op\n scheduled_ops = []\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n else: # inject custom schedule\n if len(op.axis) == 4: # schedule bias + bn + relu\n n, h, w, c = op.axis\n fused = s[op].fuse(n, h, w)\n s[op].parallel(fused)\n s[op].vectorize(c)\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_nhwc_pack_int8' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n args = [s, cfg, data_vec, conv_out, outs[0]]\n if data.dtype == 'uint8':\n kh, kw, _, _, _ = get_const_tuple(kernel.shape)\n if kh == 1 and kw == 1:\n conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)\n else:\n raise ValueError(\"Only support 1x1 kernel with \"\n \"schedule_conv2d_nhwc_pack.\")\n else:\n raise ValueError(\"Not support this data type {} with \"\n \"schedule_conv2d_nhwc_pack. Only support int8\".format(data.dtype))\n\n scheduled_ops.append(op)\n traverse(output_op)\n return s\n","repo_name":"mindspore-ai/akg","sub_path":"third_party/incubator-tvm/topi/python/topi/x86/conv2d_int8.py","file_name":"conv2d_int8.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"49"} +{"seq_id":"23295834342","text":"import random\nimport copy\nimport pygame\nimport sys\n\ndef runTest(player):\n m = create_maze()\n maze = m[0]\n tiles = m[1]\n player.maze = maze\n player.tiles = tiles\n player.maze[player._col][player._row]=tiles['player']\n player.ai(player)\n print(player.calculateScore())\ndef create_maze():\n\n tiles = {'wall': 'X',\n 'weal': '+',\n 'woe': '-',\n 'blank': ' ',\n 'player': 'P'}\n\n # maze config\n\n config = createRandomMapConfig()\n rows = config[0]\n cols = config[1]\n openSpaces = config[2]\n numWeals = config[3]\n numWoes = config[4]\n\n\n maze = createWalls(cols, rows, openSpaces, tiles)\n if not maze == 'invalid':\n maze = fillOtherCrap(numWeals, numWoes, maze, tiles)\n\n return maze,tiles\ndef createRandomMapConfig():\n rows = random.randint(4,100)\n cols = random.randint(4,100)\n openSpaces = random.randint(2,rows-1)\n numWeals = int(abs(random.gauss(0,rows*cols*.1)))\n numWoes = int(abs(random.gauss(0,rows*cols*.1)))\n return rows,cols,openSpaces,numWeals,numWoes\n\ndef createWalls(cols, rows, openSpaces, tiles):\n if openSpaces < rows:\n maze = []\n i = 2\n if cols % 2 == 0:\n i = 1\n for col in range(cols + i):\n maze.append([])\n for row in range(rows + 2):\n if row == 0:\n maze[col].append(tiles['wall'])\n if row == rows + 1:\n maze[col].append(tiles['wall'])\n else:\n if col % 2 == 0:\n maze[col].append(tiles['wall'])\n else:\n maze[col].append(tiles['blank'])\n for col in range(cols):\n\n if col % 2 == 0 and col > 0:\n openSpacesThisCol = openSpaces\n while openSpacesThisCol > 0:\n r = random.randint(1, rows - 1)\n if maze[col][r] == tiles['wall']:\n openSpacesThisCol -= 1\n maze[col][r] = tiles['blank']\n\n else:\n print(\"Unacceptable wall parameters\")\n return \"invalid\"\n return maze\n\n\ndef fillOtherCrap(numWeals, numWoes, maze, tiles):\n rows = len(maze[0])\n cols = len(maze)\n\n if (numWeals + numWoes) > .5 * rows * cols:\n print(\"Unacceptable maze parameters in fillOtherCrap\")\n return 'invalid'\n while numWeals > 0:\n c, r = random.randint(0, cols - 1), random.randint(0, rows - 1)\n if maze[c][r] == tiles['blank']:\n maze[c][r] = tiles['weal']\n numWeals -= 1\n while numWoes > 0:\n c, r = random.randint(0, cols - 1), random.randint(0, rows - 1)\n if maze[c][r] == tiles['blank']:\n maze[c][r] = tiles['woe']\n numWoes -= 1\n return maze\n\n\nclass Player():\n wealPoints = 10\n woePoints = -100\n tiles = []\n maze = []\n debug = True\n def __init__(self,ai):\n self._col = 1\n self._row = 1\n self.weals = 0\n self.woes = 0\n self.moves = 0\n self.score = 0\n self.ai = ai\n def _move(self, dCol, dRow):\n self.moves+=1\n newCol = self._col + dCol\n newRow = self._row + dRow\n destination = self.maze[newCol][newRow]\n if not destination == self.tiles['wall']:\n self.maze[self._col][self._row]=self.tiles['blank']\n self._col = newCol\n self._row = newRow\n self.maze[self._col][self._row]=self.tiles['player']\n self.sayMaze()\n if destination == self.tiles['weal']:\n self.weals+=1\n if destination == self.tiles['woe']:\n self.woes+=1\n else:\n print(\"move failed\")\n self.score-=1000\n\n def moveUp(self):\n self._move(0, -1)\n\n def moveDown(self):\n self._move(0, 1)\n\n def moveRight(self):\n self._move(1, 0)\n\n def moveLeft(self):\n self._move(-1, 0)\n\n def sayMaze(self):\n rows = len(self.maze[0])\n cols = len(self.maze)\n\n for row in range(rows):\n newRow = []\n message = ''\n for col in range(cols):\n newRow.append(self.maze[col][row])\n for c in newRow:\n message += c\n message += ' '\n self.dSay(message)\n self.dSay(\"\")\n\n\n def dSay(self,message):\n if self.debug:\n print(message)\n\n def calculateScore(self):\n score = self.wealPoints*self.weals+self.woePoints*self.woes-self.moves\n return score\n\n# example AI\ndef basicAI(self):\n movingDown = True\n while not self._col == len(self.maze)-2:\n if not self.maze[self._col+1][self._row] == self.tiles['wall']:\n self.moveRight()\n if not self.maze[self._col][self._row+1]==self.tiles['wall'] and movingDown:\n self.moveDown()\n if self.maze[self._col][self._row+1]==self.tiles['wall']:\n movingDown = False\n if not self.maze[self._col][self._row-1]==self.tiles['wall'] and not movingDown:\n self.moveUp()\n if self.maze[self._col][self._row-1]==self.tiles['wall']:\n movingDown = True\np1 = Player(basicAI)\n\nrunTest(p1)\n","repo_name":"rckohler/DungeonCrawler","sub_path":"MazeWanderer.py","file_name":"MazeWanderer.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"7686245773","text":"f = open('./input.txt', 'r')\ninput = f.read().splitlines()\nf.close()\n\nSIZE = 10\n\ndef make_grid(input):\n grid = []\n for row in input:\n octos = []\n for oct in row:\n octos.append(int(oct))\n grid.append(octos)\n return grid\n\ngrid = make_grid(input)\n\ndef run_step(grid):\n for i in range(SIZE):\n for j in range(SIZE):\n grid[i][j] += 1\n flash = True\n flashed = []\n while flash:\n flash = False\n for i in range(SIZE):\n for j in range(SIZE):\n v = grid[i][j]\n if v > 9 and (i,j) not in flashed:\n flash = True\n flashed.append((i,j))\n if i > 0:\n grid[i-1][j] += 1\n if j > 0:\n grid[i][j-1] += 1\n if j < SIZE -1:\n grid[i][j+1] += 1\n if i < SIZE -1:\n grid[i+1][j] += 1\n\n # diagonal\n if i > 0 and j > 0:\n grid[i-1][j-1] += 1\n if i > 0 and j < SIZE -1:\n grid[i-1][j+1] += 1\n if i < SIZE -1 and j < SIZE -1:\n grid[i+1][j+1] += 1\n if i < SIZE -1 and j > 0:\n grid[i+1][j-1] += 1\n for i,j in flashed:\n grid[i][j] = 0\n\n return grid, len(flashed)\n\ncount = 0\nfor step in range(100):\n grid, flashed = run_step(grid)\n count += flashed\n\nprint(f'Prva cast vysledok: {count}')\n\ngrid = make_grid(input)\n\ndef has_all_flashed(grid):\n vsetky = True\n for i in range(SIZE):\n for j in range(SIZE):\n if grid[i][j] != 0:\n vsetky = False\n return vsetky\n\nstep = 1\nwhile True:\n grid, _ = run_step(grid)\n if has_all_flashed(grid):\n break\n step += 1\n\nprint(f'Druha cast vysledok: {step}')\n","repo_name":"QuarKUS7/aoc2021","sub_path":"11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"16267047182","text":"# library -> python socket communication\nimport socket\n\n\n# public function:\n# input: none\n# return: IP adress of server, similar to type in terminal: hostname -I\n# Note: function use socker library for get server ip adress, which is further used for\n# get automatic acces in roslib.js to rosbridge_server(bridge between server - client)\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect((\"10.255.255.255\", 1))\n IP = s.getsockname()[0]\n\n except Exception:\n IP = \"127.0.0.1\"\n\n finally:\n s.close()\n\n return IP\n","repo_name":"Steigner/RM1_Server","sub_path":"app/flask_server/app/self_ipadress.py","file_name":"self_ipadress.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"23885124495","text":"def read_dictionary(filename, length):\r\n \r\n # your code here\r\n words = []\r\n f = open(filename)\r\n lines = f.read().strip()\r\n li = lines.split()\r\n\r\n #eturn lines # a list of the words in the dictionary which comprise only lower case letters and are of the correct length\r\n\r\n for w in li:\r\n if len(w) == length and w.islower() and w not in words and \"'\" not in w:\r\n words.append(w)\r\n return words","repo_name":"chloeward00/CA318-Advanced-Algorithms-and-AI-Search","sub_path":"w2-Search/LoveHateDictionary.py","file_name":"LoveHateDictionary.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"12470397402","text":"import os\nimport random\nfrom abc import abstractmethod\nfrom pathlib import Path\n\nimport torch as th\nfrom torch import nn\nimport torchvision.transforms as T\n\nfrom ml import logging\nfrom ...datasets import coco\n\nCOLORS91 = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(coco.COCO91_CLASSES))]\nCOLORS80 = [COLORS91[coco.COCO80_TO_91[i]] for i in range(len(coco.COCO80_CLASSES))]\n\n## Model factory methods to create detectors with consistent APIs\n\"\"\"\nDETR\n\"\"\"\ndef detr(pretrained=False, pooling=False, deformable=False, backbone='resnet50', num_classes=91, model_dir=None, force_reload=False, **kwargs):\n from .detr.model import detr\n model = detr(pretrained, deformable=deformable, backbone=backbone, num_classes=num_classes, model_dir=model_dir, force_reload=force_reload, **kwargs)\n return DETRDetector(model, pooling=pooling, **kwargs)\n\n\"\"\"\nYOLOX\n\"\"\"\ndef yolox_x(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_x', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n\ndef yolox_l(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_l', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n\ndef yolox_m(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_m', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n\ndef yolox_s(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_s', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n\ndef yolox_nano(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_nano', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n\ndef yolox_tiny(pretrained=True, pooling=False, num_classes=80, device='cpu', force_reload=False, unload_after=True, **kwargs):\n from .yolox.model import yolox\n model, exp = yolox(arch='yolox_tiny', pretrained=pretrained, num_classes=num_classes, device=device, force_reload=force_reload, unload_after=unload_after)\n return YOLOXDetector(model, pooling=pooling, size=exp.test_size, pad_value=114, num_classes=num_classes, **kwargs)\n \n\n## ML Detector APIs\n\nclass Detector(nn.Module):\n def __init__(self, model, classes=coco.COCO80_CLASSES, rewrites=None, **kwargs):\n '''\n Args:\n model(nn.Module): pre-defined model to make inferences\n classes(List[str]): list of class labels\n rewrites(dict): target to rewrite one or more source labels into an aggregated one\n '''\n super(Detector, self).__init__()\n self.module = model\n self.classes = classes\n\n def __getattr__(self, name):\n try:\n return super(Detector, self).__getattr__(name)\n except AttributeError:\n return getattr(self.module, name)\n\n @property\n def __class__(self):\n return self.module.__class__\n\n @property\n def with_rpn(self):\n return False\n\n @property\n def with_mask(self):\n return False\n \n @property\n def with_keypts(self):\n return False\n\n @abstractmethod\n def backbone(self, images, **kwargs):\n pass\n\n @abstractmethod\n def rpn(self, images, **kwargs):\n pass\n\n @abstractmethod\n def detect(self, images, **kwargs):\n pass\n \n @abstractmethod\n def forward(self, *args, **kwargs):\n return self.module(*args, **kwargs)\n\n\nclass DETRDetector(Detector):\n def __init__(self, model, pooling=False, **kwargs):\n super(DETRDetector, self).__init__(model, **kwargs)\n self.engine = None\n\n mean = kwargs.get('mean', [0.485, 0.456, 0.406])\n std = kwargs.get('std', [0.229, 0.224, 0.225])\n resize = kwargs.get('resize', (800, 800))\n self.transform = T.Compose([\n T.Resize(resize, antialias=True),\n T.Lambda(lambda x: x.float().div(255.0)),\n T.Normalize(mean, std)\n ])\n\n self.pooling = pooling\n\n def forward(self, *args, **kwargs):\n outputs = self.module(*args, **kwargs)\n return outputs\n\n def deploy(self):\n r\"\"\"Deploy optimized runtime backend.\n \"\"\"\n raise NotImplementedError('Deployment for DETR is not supported yet')\n \n def detect(self, images, **kwargs):\n \"\"\"Perform object detection. \n \"\"\"\n param = next(self.parameters())\n\n from ml.vision.models.detection import detr\n conf_thres = kwargs.get('cls_thres', 0.5)\n \n batch, sizes = detr.preprocess(images, transform=self.transform)\n with th.inference_mode():\n if self.engine is None:\n outputs, recordings = self(batch.to(param.device))\n else:\n raise NotImplementedError('DETR engine is not supported yet')\n\n decoder_out = recordings[0][-1][0][-1] # transformer decoder memory last layer\n dets, feats = detr.postprocess(outputs, decoder_out, sizes, conf=conf_thres)\n\n if self.pooling:\n return dets, feats\n else:\n return dets\n \n\nclass YOLOXDetector(Detector):\n def __init__(self, model, pooling=False, **kwargs):\n super().__init__(model, **kwargs)\n self.engine = None\n\n self.input_size = kwargs.get('size', (640, 640))\n self.pad_vaue = kwargs.get('pad_value', 114)\n self.num_classes = kwargs.get('num_classes', 80)\n\n def forward(self, *args, **kwargs):\n outputs = self.module(*args, **kwargs)\n return outputs\n\n def deploy(self, name='yolox', batch_size=10, spec=(3, 640, 640), fp16=True, backend='trt', reload=False, **kwargs):\n r\"\"\"Deploy optimized runtime backend.\n \"\"\"\n from ml import deploy\n module = self.module\n\n int8 = kwargs.get('int8', False)\n strict = kwargs.get('strict', False)\n if int8:\n from ml import hub\n from ml.vision.datasets.coco import download\n\n def preprocessor(size=(640, 640)):\n from PIL import Image\n from torchvision import transforms\n trans = transforms.Compose([transforms.Resize(size),\n transforms.ToTensor()])\n\n H, W = size\n def preprocess(image_path, *shape):\n r'''Preprocessing for TensorRT calibration\n Args:\n image_path(str): path to image\n channels(int):\n '''\n image = Image.open(image_path)\n logging.debug(f\"image.size={image.size}, mode={image.mode}\")\n image = image.convert('RGB')\n C = len(image.mode)\n im = trans(image)\n assert im.shape == (C, H, W)\n return im\n\n return preprocess\n\n int8_calib_max = kwargs.get('int8_calib_max', 5000)\n int8_calib_batch_size = kwargs.get('int8_calib_batch_size', max(batch_size, 64)) \n cache = f'{name}-COCO2017-val-{int8_calib_max}-{int8_calib_batch_size}.cache'\n cache_path = Path(os.path.join(hub.get_dir(), cache))\n kwargs['int8_calib_cache'] = str(cache_path)\n kwargs['int8_calib_data'] = download(split='val2017', reload=False)\n kwargs['int8_calib_preprocess_func'] = preprocessor(spec[1:])\n kwargs['int8_calib_max'] = int8_calib_max\n kwargs['int8_calib_batch_size'] = int8_calib_batch_size\n\n device = next(self.module.parameters()).device\n self.to('cpu') \n self.engine = deploy.build(f\"{name}-bs{batch_size}_{spec[-2]}x{spec[-1]}{fp16 and '_fp16' or ''}{int8 and '_int8' or ''}{strict and '_strict' or ''}\",\n self,\n [spec],\n backend=backend, \n reload=reload,\n batch_size=batch_size,\n fp16=fp16,\n strict_type_constraints=strict,\n **kwargs)\n self.to(device)\n # TODO: avoid storing dummy modules to keep track of module device\n self.module = module.head.obj_preds[-1]\n # del self.module\n \n def detect(self, images, **kwargs):\n \"\"\"Perform object detection. \n \"\"\"\n device = next(self.parameters()).device\n\n from ml.vision.models.detection import yolox\n nms_thresh = kwargs.get('nms_thresh', 0.65)\n cls_thresh = kwargs.get('cls_thresh', 0.45)\n batch_preprocess = kwargs.get('batch_preprocess', False)\n\n images = images.to(device) if batch_preprocess else images\n batch, ratio = yolox.preprocess(images, input_size=self.input_size, pad_value=self.pad_vaue)\n with th.inference_mode():\n if self.engine is None:\n predictions, feats = self(batch.to(device))\n else:\n predictions, *feats = self.engine.predict(batch.to(device))\n \n\n dets = yolox.postprocess(predictions, ratio, num_classes=self.num_classes, nms_thre=nms_thresh, conf_thre=cls_thresh)\n\n return dets","repo_name":"necla-ml/ML-Vision","sub_path":"ml/vision/models/detection/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":10624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"7639708706","text":"# re的第三个参数,模式匹配,可以有好几个,用|连接, 同时生效\n# re.I 忽略大小写\n# re.S 表示.将匹配所有字符,包括\\n\n\nimport re\n\nlanguage = 'PythonC#\\nJavaPHP'\n\nr = re.findall('c#.{1}', language, re.I | re.S)\nprint(r)\n","repo_name":"hazel-new/Hazel_python","sub_path":"视频入门练习代码/10/c13.py","file_name":"c13.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"44645247991","text":"import tensorflow as tf\r\nphysical_devices = tf.config.experimental.list_physical_devices(\"GPU\")\r\nif len(physical_devices) > 0:\r\n tf.config.experimental.set_memory_growth(physical_devices[0],True)\r\n logical_devices = tf.config.list_logical_devices(\"CPU\")\r\nfrom model import model\r\nimport numpy as np\r\nimport hashlib\r\nfrom tensorflow.keras import losses, optimizers,initializers,metrics\r\nfrom setting import modelpath,subjects_train,subjects_test,architecture,data_2d_path,data_3d_path,output,loss_l,batch_size,epoch\r\nfrom amsgrad import AMSGrad\r\nfrom loss import MPJPE,p_mpjpe\r\nfrom process.h36m_dataset import Human36mDataset\r\nfrom process.camera import world_to_camera,normalize_screen_coordinates\r\nfrom data import data_process, deterministic_random,data_process1\r\nimport datetime\r\nimport os\r\nfrom tqdm import tqdm\r\nprint(tf.executing_eagerly())\r\n#optimizer=AMSGrad(learning_rate=0.01, beta1=0.9, beta2=0.99, epsilon=1e-8)\r\noptimizer=tf.keras.optimizers.Adam(learning_rate=0.001)\r\nloss=MPJPE\r\nif loss_l==\"p_mpjpe\":\r\n loss=p_mpjpe\r\n\r\nacc_meter=tf.metrics.CategoricalAccuracy()\r\n@tf.function\r\ndef train_one_step(x,y):\r\n \"\"\"\r\n 一次迭代过程\r\n \"\"\"\r\n # 求loss\r\n with tf.GradientTape() as tape:\r\n predictions = model(x)\r\n acc_meter.update_state(y_true=y, y_pred=predictions)\r\n loss1 = loss(y,predictions)\r\n # 求梯度\r\n grad = tape.gradient(loss1, model.trainable_variables)\r\n optimizer.apply_gradients(zip(grad, model.trainable_variables))\r\n return loss1\r\n\r\n# cpus = tf.config.experimental.list_physical_devices(device_type='CPU')\r\n# print(gpus)\r\n# if gpus:\r\n# gpu0 = gpus[0] #如果有多个GPU,仅使用第0个GPU\r\n# tf.config.set_visible_devices([gpu0], \"GPU\")\r\n# tf.config.experimental.set_memory_growth(gpu0, True) #设置GPU显存用量按需使用\r\n# # 或者也可以设置GPU显存为固定使用量(例如:4G)\r\n# #tf.config.experimental.set_virtual_device_configuration(gpu0,\r\n# # [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])\r\n\r\n# @tf.function\r\n# def test_acc():\r\n# for x1, y1 in test_dataset:\r\n# pred = model(x1) # 前向计算\r\n# acc_meter1.update_state(y_true=y1, y_pred=pred) # 更新准确率统计\r\n# print(\"测试集正确率为:\",acc_meter1.result())\r\n# acc_meter1.reset_states()\r\nif __name__ == '__main__':\r\n\r\n model.compile(\r\n optimizer=optimizer,\r\n loss=loss,\r\n metrics=['accuracy']\r\n )\r\n print(\"load model\")\r\n dataset = Human36mDataset(data_2d_path)\r\n print('Preparing data...')\r\n for subject in dataset.subjects():\r\n for action in dataset[subject].keys():\r\n anim = dataset[subject][action]\r\n\r\n if 'positions' in anim:\r\n positions_3d = []\r\n for cam in anim['cameras']:\r\n pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])\r\n pos_3d=pos_3d.numpy()\r\n # print(pos_3d[:, :1])\r\n # print( pos_3d[:, 1:])\r\n aa=tf.tile(pos_3d[:, :1],multiples=[1,16,1])\r\n # print(aa)\r\n pos_3d[:, 1:] -=aa # Remove global offset, but keep trajectory in first position\r\n positions_3d.append(tf.convert_to_tensor(pos_3d,dtype=float))\r\n anim['positions_3d'] = positions_3d\r\n print('Loading 2D detections...')\r\n keypoints = np.load(data_3d_path, allow_pickle=True)\r\n keypoints_metadata = keypoints['metadata'].item()\r\n keypoints_symmetry = keypoints_metadata['keypoints_symmetry']\r\n kps_left, kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])\r\n joints_left, joints_right = list(dataset.skeleton().joints_left()), list(dataset.skeleton().joints_right())\r\n keypoints = keypoints['positions_2d'].item()\r\n for subject in dataset.subjects():\r\n assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)\r\n for action in dataset[subject].keys():\r\n assert action in keypoints[\r\n subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action, subject)\r\n if 'positions_3d' not in dataset[subject][action]:\r\n continue\r\n\r\n for cam_idx in range(len(keypoints[subject][action])):\r\n\r\n # We check for >= instead of == because some videos in H3.6M contain extra frames\r\n mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0]\r\n assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length\r\n\r\n if keypoints[subject][action][cam_idx].shape[0] > mocap_length:\r\n # Shorten sequence\r\n keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length]\r\n\r\n assert len(keypoints[subject][action]) == len(dataset[subject][action]['positions_3d'])\r\n\r\n for subject in keypoints.keys():\r\n for action in keypoints[subject]:\r\n for cam_idx, kps in enumerate(keypoints[subject][action]):\r\n # Normalize camera frame\r\n cam = dataset.cameras()[subject][cam_idx]\r\n kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])\r\n keypoints[subject][action][cam_idx] = kps\r\n\r\n subjects_train = subjects_train.split(',')\r\n subjects_test = subjects_test.split(',')\r\n\r\n\r\n def fetch(subjects, action_filter=None, subset=1, parse_3d_poses=True):\r\n out_poses_3d = []\r\n out_poses_2d = []\r\n out_camera_params = []\r\n for subject in subjects:\r\n for action in keypoints[subject].keys():\r\n if action_filter is not None:\r\n found = False\r\n for a in action_filter:\r\n if action.startswith(a):\r\n found = True\r\n break\r\n if not found:\r\n continue\r\n\r\n poses_2d = keypoints[subject][action]\r\n for i in range(len(poses_2d)): # Iterate across cameras\r\n out_poses_2d.append(poses_2d[i])\r\n\r\n if subject in dataset.cameras():\r\n cams = dataset.cameras()[subject]\r\n assert len(cams) == len(poses_2d), 'Camera count mismatch'\r\n for cam in cams:\r\n if 'intrinsic' in cam:\r\n out_camera_params.append(cam['intrinsic'])\r\n\r\n if parse_3d_poses and 'positions_3d' in dataset[subject][action]:\r\n poses_3d = dataset[subject][action]['positions_3d']\r\n assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'\r\n for i in range(len(poses_3d)): # Iterate across cameras\r\n out_poses_3d.append(poses_3d[i])\r\n\r\n if len(out_camera_params) == 0:\r\n out_camera_params = None\r\n if len(out_poses_3d) == 0:\r\n out_poses_3d = None\r\n\r\n stride = 1\r\n if subset < 1:\r\n for i in range(len(out_poses_2d)):\r\n n_frames = int(round(len(out_poses_2d[i]) // stride * subset) * stride)\r\n start = deterministic_random(0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))\r\n out_poses_2d[i] = out_poses_2d[i][start:start + n_frames:stride]\r\n if out_poses_3d is not None:\r\n out_poses_3d[i] = out_poses_3d[i][start:start + n_frames:stride]\r\n elif stride > 1:\r\n # Downsample as requested\r\n for i in range(len(out_poses_2d)):\r\n out_poses_2d[i] = out_poses_2d[i][::stride]\r\n if out_poses_3d is not None:\r\n out_poses_3d[i] = out_poses_3d[i][::stride]\r\n\r\n return out_camera_params, out_poses_3d, out_poses_2d\r\n\r\n\r\n cameras_valid, poses_valid, poses_valid_2d = fetch(subjects_test, None)\r\n filter_widths = [int(x) for x in architecture.split(',')]\r\n # print(np.array(poses_valid).shape)\r\n # print(np.array(poses_valid_2d).shape)\r\n poses_valid_2d=data_process(poses_valid_2d)\r\n poses_valid = data_process1(poses_valid)\r\n poses_valid=tf.convert_to_tensor(poses_valid)\r\n poses_valid_2d=tf.convert_to_tensor(poses_valid_2d)\r\n dataest = tf.data.Dataset.from_tensor_slices((poses_valid_2d, poses_valid))\r\n dataest = dataest.shuffle(buffer_size=10000).prefetch(tf.data.experimental.AUTOTUNE).repeat(\r\n 1).batch(batch_size)\r\n # for epochs in range(epoch):\r\n # # 使用tqdm提示训练进度\r\n # with tqdm(total=2345/batch_size,desc='Epoch {}/{}'.format(epochs, epoch)) as pbar:\r\n # # 每个epoch训练settings.STEPS_PER_EPOCH次\r\n # for x, y in dataest:\r\n # print(x,y)\r\n # loss2 = train_one_step(x, y)\r\n # pbar.set_postfix(loss='%.4f' % float(loss2), acc=float(acc_meter.result()))\r\n # pbar.update(1)\r\n model.fit(poses_valid_2d,\r\n poses_valid,\r\n batch_size=batch_size,\r\n epochs=epoch)\r\n #模型保存\r\n if output:\r\n if (os.path.exists(output) == False):\r\n os.makedirs(output)\r\n model.save(output+str(datetime.datetime.now())+\".h5\")\r\n model.save(output+str(datetime.datetime.now()), save_format='tf')\r\n","repo_name":"walnut-mzy/Videopose3d-s-of-tensorflow2","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"44497892480","text":"import re\n\nlines = open('input.txt').readlines()\noutput_lines = []\n\ndirect_mapping = [ (r'BOOL', r'bool'),\n (r'TRUE', r'true'),\n (r'FALSE', r'false'),\n (r'import', r'include'),\n (r'CGRect', r'CCRect'),\n (r'CGSize', r'CCSize'),\n (r'CGPoint', r'CCPoint'),\n (r'CGFloat', r'float'),\n (r'UITouch', r'CCTouch'),\n (r'UIEvent', r'CCEvent'),\n (r'NSSet', r'CCSet'),\n (r'NSArray', r'CCArray'),\n (r'.contentSize', r'->getContentSize()'),\n (r'.boundingBox', r'->boundingBox()'),\n (r'%@', r'%s'),\n (r'@', r''),\n (r'NSLog', r'CCLog'),\n (r'.position.x', r'->getPositionX()'),\n (r'.position.y', r'->getPositionY()'),\n (r'nil', r'NULL'),\n (r'NSMutableArray', r'CCArray'),\n (r'NSMutableDictionary', r'CCDictionary'),\n (r'NSString', r'CCString')\n ]\n\nproperties = [ 'position',\n 'scale',\n 'opacity',\n 'color',\n 'contentSize',\n 'tag',\n 'rotation',\n 'anchorPoint',\n 'zOrder'\n ]\n\nactions = [ 'removeFromParentAndCleanup',\n 'removeAllChildrenWithCleanup',\n 'addChild',\n 'removeObjectAtIndex',\n 'addObject',\n 'runAction'\n ]\n\nfor i in range(len(lines)):\n one_line = lines[i]\n\n # calculate the leading spaces\n ls = ''\n for j in one_line:\n if j == ' ' or j == '\\t':\n ls += j\n else:\n break\n\n # addChild\n ss = re.split(r'\\W+', one_line)\n ss = [x for x in ss if len(x) != 0]\n for j in actions:\n if len(ss) == 3 and ss[1] == j:\n one_line = ls + ss[0] + '->' + ss[1] + '(' + ss[2] + ');\\n'\n\n # property settings: change sprite.position = ccp(0, 0); to sprite->setPosition(ccp(0, 0));\n # property change should be modified before the direct mapping\n for j in properties:\n sp = '.' + j + ' = '\n ss = re.split(sp, one_line)\n if len(ss) == 2:\n ss[1] = re.sub(';', ');', ss[1])\n one_line = ss[0] + '->set' + j.capitalize() + '(' + ss[1]\n break\n\n # direct mapping\n for dm in direct_mapping:\n one_line = re.sub(dm[0], dm[1], one_line)\n\n # the order matters e.g. .position.x vs .position\n for j in properties:\n sp = '.' + j\n spnew = '->get' + j.capitalize() + '()'\n one_line = re.sub(sp, spnew, one_line)\n\n ss = re.split(' = ', one_line)\n if len(ss) == 2:\n sss = re.split('objectAtIndex:', ss[1])\n if len(sss) == 2:\n one_line = ss[0] + ' = ' + re.sub('\\[', '', sss[0]) + '->objectAtIndex(' + re.sub(';', ');', re.sub('\\]', '', sss[1]))\n\n output_lines.append(one_line)\n\n# write back to this file\nf = open('output.txt', 'w')\nf.writelines(output_lines)\nf.close()\n","repo_name":"guanqun/cocos2d-convert-to-x","sub_path":"convert2ccx.py","file_name":"convert2ccx.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"9325171311","text":"import unittest\nfrom two_sum_ii_input_array_is_sorted import Solution\n\n\nclass TestSolution(unittest.TestCase):\n def test_Calculate_Solution(self):\n sol = Solution()\n\n self.assertEqual([1, 2], sol.twoSum([2, 7, 11, 15], 9))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SinCatGit/leetcode","sub_path":"00167/test_two_sum_ii_input_array_is_sorted.py","file_name":"test_two_sum_ii_input_array_is_sorted.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"30311397240","text":"import calendar\nNAME=input('ENTER NAME: ')\ndays=['Monday', 'Tuesday','Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\nmonths=['January','February','March','April','May','June','July',\n 'August','September','October','November','December']\nendings=['st','nd','rd']+17*['th']+['st','nd','rd']+7*['th']+['st']\nprint('Hello! '+NAME+', are you interested in the day \\\nof your Birthday?')\nD=input('Enter Day(1-31): ')\nM=input('Enter Month(1-12): ')\nY=input('Enter Year: ')\nd=int(D)\nm=int(M)\ny=int(Y)\nday_of_date =calendar.weekday(month=m, day=d, year=y)\nprint(NAME+', you were born on '+ days[day_of_date]+' '+months[m-1]+' '+D+endings[d-1]+'/'+M+'/'+Y)\ninput('Press to Exit')\n","repo_name":"Nicolas-Lakes/YOUR-BIRTHDAY","sub_path":"Birthday.py","file_name":"Birthday.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"42099999580","text":"\"\"\"\nVarious helper functions that are more general and thus not contained\nin one of the named modules.\n\"\"\"\n\nfrom typing import Union, Dict, Optional, Tuple\nimport numpy as np\nimport quantities as pq\nfrom dlens_vx_v3 import hal, sta, halco\n\nimport pyccalix\nfrom calix.common import base\n\n\ndef wait(builder: base.WriteRecordingPlaybackProgramBuilder,\n waiting_time: pq.quantity.Quantity) \\\n -> base.WriteRecordingPlaybackProgramBuilder:\n \"\"\"\n Waits for a given amount of time.\n\n This function appends instructions to the given builder which\n first reset the timer and then wait until the given time is reached.\n\n :param builder: Builder to add wait instruction to.\n :param waiting_time: Time to wait for.\n\n :return: Builder with wait instruction added to.\n \"\"\"\n\n # Returning the modified builder is regarded as bad style.\n # This returning should be removed, also elsewhere, cf. issue 3952\n builder.write(halco.TimerOnDLS(), hal.Timer())\n builder.block_until(halco.TimerOnDLS(), hal.Timer.Value(int(\n waiting_time.rescale(pq.us)\n * int(hal.Timer.Value.fpga_clock_cycles_per_us))))\n return builder\n\n\ndef capmem_noise(start: int = -5, end: int = 6,\n size: Optional[Union[int, Tuple[int]]] = None\n ) -> Union[int, np.ndarray]:\n \"\"\"\n Creates random integers between start and end-1.\n Used mainly to vary CapMem settings in order to avoid\n setting many cells to the same value.\n\n :param start: Lower end of the random range.\n :param end: One above the upper end of the random range.\n :param size: Number/shape of values to draw. If None, a single integer\n is returned.\n\n :return: Array with integer noise, or single integer.\n \"\"\"\n\n return np.random.randint(start, end, size)\n\n\ndef capmem_set_quadrant_cells(\n builder: base.WriteRecordingPlaybackProgramBuilder,\n config: Dict[halco.CapMemCellOnCapMemBlock, Union[int, np.ndarray]]\n) -> base.WriteRecordingPlaybackProgramBuilder:\n \"\"\"\n Set multiple CapMem cells that are global per quadrant to the same\n provided values.\n\n :param builder: Builder to append configuration to.\n :param config: Dict with the desired configuration.\n\n :return: Builder with configuration appended.\n \"\"\"\n\n for capmem_block_id, capmem_block in enumerate(\n halco.iter_all(halco.CapMemBlockOnDLS)):\n for cell, value in config.items():\n coord = halco.CapMemCellOnDLS(cell, capmem_block)\n\n if isinstance(value, np.ndarray):\n builder.write(coord, hal.CapMemCell(\n hal.CapMemCell.Value(value[capmem_block_id])))\n else:\n builder.write(coord, hal.CapMemCell(\n hal.CapMemCell.Value(value)))\n\n return builder\n\n\ndef capmem_set_neuron_cells(\n builder: base.WriteRecordingPlaybackProgramBuilder,\n config: Dict[halco.CapMemRowOnCapMemBlock, Union[int, np.ndarray]]\n) -> base.WriteRecordingPlaybackProgramBuilder:\n \"\"\"\n Set single CapMem rows on the neurons to the desired values.\n Expects parameters to be configured along with the\n desired row coordinates.\n The parameters can either be a numpy array of integer values, which\n are written to the neurons directly, or a single value, which is\n written only after adding some noise of +/- 5, if the range allows.\n Values of zero are not changed, turning something off is always possible.\n\n :param builder: Builder to append configuration to.\n :param config: Dict which contains pairs of CapMemRowOnCapMemBlock\n coordinates and either a single CapMemCell value or\n an array of CapMemCell values.\n In case a single non-zero value is given, it is changed to an\n array with noise and this array is written to hardware. This aims to\n reduce the crosstalk between CapMem cells.\n\n :return: Builder with configuration appended.\n \"\"\"\n\n noise_amplitude = 5\n\n dumper = sta.PlaybackProgramBuilderDumper()\n for capmem_row, parameters in config.items():\n # Add noise if single, non-zero value is given\n if not isinstance(parameters, np.ndarray):\n value = parameters\n parameters = np.ones(halco.NeuronConfigOnDLS.size, dtype=int) \\\n * value\n if value != 0:\n parameters += capmem_noise(\n max(hal.CapMemCell.Value.min - value, -noise_amplitude),\n min(hal.CapMemCell.Value.max - value, noise_amplitude) + 1,\n size=halco.NeuronConfigOnDLS.size)\n config[capmem_row] = parameters\n\n # Append write instructions to builder\n pyccalix.helpers.write_capmem_row(dumper,\n capmem_row,\n config[capmem_row])\n\n builder.dumper.copy_back(dumper)\n builder.builder.merge_back(sta.convert_to_builder(dumper))\n return builder\n","repo_name":"electronicvisions/calix","sub_path":"src/py/calix/common/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"40275243313","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 8 11:04:26 2020\n\n@author: Nitin\n\"\"\"\n\nif __name__ == '__main__':\n l = []\n m = []\n for i in range(int(input())):\n name = input()\n m.append(name)\n score = float(input())\n m.append(score)\n l.append(m)\n m = [] \n for i in range(len(l)):\n m.append(l[i][1])\n m = set(m)\n m = list(m)\n m.sort()\n n = []\n for i in range(len(l)):\n if m[1] == l[i][1]:\n n.append(l[i][0])\n n.sort()\n for i in n:\n print(i)","repo_name":"NitinR2510/Python3_learning","sub_path":"HackerRank/nestedlist.py","file_name":"nestedlist.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"6059705068","text":"#!/usr/bin/env python3\r\nimport copy\r\n\r\nfrom Models import *\r\n\r\n\r\nclass PetriNet:\r\n # markings that have been found\r\n markings = list()\r\n # these two variables are used to make the display look better\r\n mark_numbers = 0\r\n tabs = 0\r\n\r\n def __init__(self, transitions):\r\n self.transitions = transitions\r\n\r\n def __repr__(self):\r\n string = ''\r\n for t in self.transitions:\r\n string += f'{t}\\n'\r\n return string\r\n\r\n '''\r\n displays the initial marking if it has not been found before \r\n and also children markings that are already found before\r\n '''\r\n def marking_tree(self, initial_marking, tabs=0, transition=0):\r\n for i in range(len(self.transitions)):\r\n # if the initial marking has not been found yet, then add it to the markings list and print it\r\n if not self.already_exists(initial_marking):\r\n PetriNet.markings.append(initial_marking)\r\n for t in range(tabs):\r\n print('\\t', end='')\r\n if transition:\r\n print(f'{transition}-->', end='')\r\n print(f'M{PetriNet.mark_numbers}', initial_marking)\r\n PetriNet.mark_numbers += 1\r\n\r\n # if a transition is executable then save this Petri Net, make a copy of it and execute the transition\r\n if self.transitions[i].is_executable():\r\n for t in range(tabs):\r\n print('\\t', end='')\r\n petri_copy = copy.deepcopy(self)\r\n petri_copy.transitions[i].execute()\r\n\r\n # if it gives a new marking then pass it the new marking_tree() call.\r\n if not self.already_exists(petri_copy.get_marking()):\r\n PetriNet.tabs += 1\r\n petri_copy.marking_tree(petri_copy.get_marking(), PetriNet.tabs, self.transitions[i].name)\r\n PetriNet.tabs -= 1\r\n\r\n # if it's an old marking then just print it\r\n else:\r\n for t in range(tabs + 1):\r\n print('\\t', end='')\r\n print(f'{self.transitions[i].name}-->M{PetriNet.markings.index(petri_copy.get_marking())}',\r\n petri_copy.get_marking())\r\n\r\n '''\r\n tests if a marking is already defined\r\n '''\r\n def already_exists(self, marking):\r\n for mark in PetriNet.markings:\r\n # print(f'comparing {mark} and {m}')\r\n if mark == marking:\r\n return True\r\n\r\n '''\r\n browse the all the net's places and get their tokens,\r\n returns a marking object\r\n '''\r\n def get_marking(self):\r\n places = list()\r\n for t in self.transitions:\r\n for i in t.input_arcs:\r\n if not places.__contains__(i.start):\r\n places.append(i.start)\r\n for o in t.output_arcs:\r\n if not places.__contains__(o.end):\r\n places.append(o.end)\r\n places = sorted(places, key=lambda x: x.name)\r\n places = self.places_to_ints(places)\r\n m = Marking(places)\r\n return m\r\n\r\n '''\r\n converts a list of places to a list of integers\r\n '''\r\n def places_to_ints(self, places):\r\n ints = list()\r\n for p in places:\r\n ints.append(p.tokens)\r\n return ints\r\n\r\n\r\ndef main():\r\n # a Place constructor assign a name and tokens to the place\r\n p1 = Place(\"p1\", 3)\r\n p2 = Place(\"p2\", 0)\r\n p3 = Place(\"p3\", 0)\r\n\r\n # we cannot assign start and end points to arcs now because they could be transitions\r\n # and transitions themselves need arcs to be instantiated\r\n # an Arc constructor assign a cost to the arc(1 per default)\r\n a1 = Arc(3)\r\n a2 = Arc(2)\r\n a3 = Arc()\r\n a4 = Arc()\r\n a5 = Arc()\r\n a6 = Arc()\r\n a7 = Arc()\r\n a8 = Arc()\r\n a9 = Arc()\r\n\r\n # a Transition constructor assign a name and two lists of arcs as inputs and outputs to the transition\r\n t1 = Transition(\"t1\", [a2], [a3])\r\n t2 = Transition(\"t2\", [a4], [a5])\r\n t3 = Transition(\"t3\", [a6], [a7])\r\n t4 = Transition(\"t4\", [a8, a9], [a1])\r\n\r\n # now that the transitions are created we can assign start and end points to the arcs\r\n a1.assign_start_end(t4, p1)\r\n a2.assign_start_end(p1, t1)\r\n a3.assign_start_end(t1, p2)\r\n a4.assign_start_end(p2, t2)\r\n a5.assign_start_end(t2, p3)\r\n a6.assign_start_end(p3, t3)\r\n a7.assign_start_end(t3, p2)\r\n a8.assign_start_end(p3, t4)\r\n a9.assign_start_end(p1, t4)\r\n\r\n # the transitions contain arcs and arcs contain places so the transitions are all that is needed to initialize a Petri Net\r\n petri = PetriNet([t1, t2, t3, t4])\r\n\r\n petri.marking_tree(petri.get_marking())\r\n\r\n\r\nif __name__ == '__main__': main()\r\n","repo_name":"Badredine-Kheddaoui/Petri-net-marking-tree","sub_path":"PetriNet.py","file_name":"PetriNet.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"41676792500","text":"# (Filament_z_axis.py)\n'''\n Computing E and B modes for density filament with a magnetic field wrapped around it. See Sect. 3.2 in Bracco et al. 2018.\n andrea.bracco@su.se\n'''\n\nimport numpy as np\nimport pylab as plt\nimport matplotlib\nfrom matplotlib import cm\ncm_my=cm.viridis\nfrom matplotlib import rc\nimport astropy\nfrom astropy.io import fits\nimport sys\nimport math\nimport scipy\nfrom scipy import ndimage as nd\nfrom scipy import stats as st\n\nfont = {'size' : 18}\n\n# Import our module with all the routine.\nexec(open('Routines_supply_full.py').read())\n\n# Define some constants.\nnx = 101\nny = 101\nnz = 101 \n\n# Define the box size.\nx = np.linspace(-1, 1, nx)\ny = np.linspace(-1, 1, ny)\nz = np.linspace(-1, 1, nz)\nxx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\ndx=(x[1]-x[0])\ndy=(y[1]-y[0])\ndz=(z[1]-z[0])\n\ntheta0=0*math.pi/180. # around the y-axis\nphi0=0*math.pi/180. # around the z-axis\n\n# Create the magnetic field with the vector potential. kappa [-inf,inf] beta [0,inf]\nafield, bfield = twist(xx, yy, zz, kappa=0., beta=0, bkg=1, theta = theta0, phi = phi0, shift=[0,0,0])\n\n# Create the density distribution.\nrho = column(xx, yy, zz, rho_i=2, rho_e=1, r=0.2, shift=[0,0,0], theta = theta0, phi = phi0)\n\n# Compute the column density.\nrho_column = np.sum(rho, axis=1)*dy\n\n# Compute Q and U.\np0=0.26\nii, qq, uu = find_qu(bfield, rho=rho, p0=p0, dy=y[1]-y[0], north=[1, 0])\n#ii=rho_column\n\n# Compute E and B modes\nee1, bb1 = polpy(qq,uu,nx,nz)\n\n# Compute the line-of-sight integrated magnetic helicity.\nhh1 = np.sum(afield*bfield, axis=(0, 2))*(y[1]-y[0])\nh_total1 = np.sum(afield*bfield)*(x[1]-x[0])*(y[1]-y[0])*(z[1]-z[0])\n# Compute the power spectra.\nk_shells, ee_power1 = powspec2D(ee1,ee1,nx,nz)\nk_shells, bb_power1 = powspec2D(bb1,bb1,nx,nz)\nk_shells, eb_power1 = powspec2D(ee1,bb1,nx,nz)\nk_shells, te_power1 = powspec2D(ii,ee1,nx,nz)\nk_shells, tb_power1 = powspec2D(ii,bb1,nx,nz)\nk_shells, tt_power1 = powspec2D(ii,ii,nx,nz)\n\nel0=np.isfinite(ee_power1)\nkg=k_shells[el0]\n\nr_tb1 = tb_power1[el0]/np.sqrt(bb_power1[el0]*tt_power1[el0])\nr_eb1 = eb_power1[el0]/np.sqrt(bb_power1[el0]*ee_power1[el0])\nr_te1 = te_power1[el0]/np.sqrt(tt_power1[el0]*ee_power1[el0])\n\n","repo_name":"abracco/cosmicodes","sub_path":"HBEB/Filament_z_axis.py","file_name":"Filament_z_axis.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"37970109691","text":"from turtle import *\nimport random\nimport time\n\nt=Turtle()\nt.up()\nt.setx(200)\nt.sety(-250)\nt.down()\nt.pencolor(\"grey\")\nt.write(\"By WJQ\", font=(\"normal\", 12, \"italic\")) #font-type有normal, bold, italic, underline\n\nn = 80.0\n \nspeed(\"fastest\")\nscreensize(bg='seashell')\ntime.sleep(7)\nleft(90)\nforward(3*n)\ncolor(\"orange\", \"red\")\nbegin_fill()\n\nleft(126)\n\nfor i in range(5):\n forward(n/5)\n right(144)\n forward(n/5)\n left(72)\n'''\nright(90)\nforward(n/6)\nleft(120)\nforward(n/3)\nleft(120)\nforward(n/3)\nleft(120)\nforward(n/6)\n'''\nend_fill()\n#left(90)\nright(126)\n\ncolor(\"dark green\")\nbackward(n*4.8)\ndef tree(d, s):\n if d <= 0: return\n forward(s)\n tree(d-1, s*.8)\n right(120)\n tree(d-3, s*.5)\n right(120)\n tree(d-3, s*.5)\n right(120)\n backward(s)\ntree(15, n)\nbackward(n/2)\n\nfor i in range(200):\n a = 200 - 400 * random.random()\n b = 10 - 20 * random.random()\n up()\n forward(b)\n left(90)\n forward(a)\n down()\n if random.randint(0, 1) == 0:\n color('tomato')\n else:\n color('wheat')\n circle(2)\n up()\n backward(a)\n right(90)\n backward(b)\n\ntime.sleep(5)","repo_name":"jiaqiangwjq/python_workhouse","sub_path":"tiny_scripts/turtle画圣诞树/tree_three.py","file_name":"tree_three.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"11181163643","text":"# Example - passing of different kinds of parameters to Mitsuba and PBRT\n\n# Discussion about passing of different types of parameter values\n# to Mitsuba and PBRT. Rendered images only illustrate that there\n# was no issue with handling of parameters.\n\nconfiguration = {\"webpage_generate\": True, \"webpage_display\": True}\n\nscenes = [\"pool_classic\"]\n\nrenderers = {\n \"mitsubaRenderer_0_5\": {\n \"type\": \"mitsuba_0_5\",\n \"path\": \"data/renderers/mitsuba_0_5/mitsuba.exe\",\n },\n \"pbrt3Renderer\": {\n \"type\": \"pbrt_3\",\n \"path\": \"data/renderers/pbrt_3/pbrt.exe\",\n },\n}\n\nparameter_sets = {\n \"mitsuba\": {\n \"integrator\": [\n [\"type\", \"\", \"pssmlt\"],\n [\"bidirectional\", \"boolean\", True], # Boolean\n [\"maxDepth\", \"integer\", 10], # Integer\n [\"twoStage\", \"boolean\", \"true\"], # Alternative for boolean\n [\"pLarge\", \"float\", 0.25], # Float\n # String would be printed as is:\n # [\"stringParamName\", \"string\", \"stringValue\"]\n # Result: \n # List of values \"x, y, z\" can be inputted as Python list:\n # [\"listParamName\", \"rgb\", [x, y, z]]\n # or as an equivalent Mitsuba string (will be copied as is):\n # [\"listParamName\", \"rgb\", \"x, y, z\"]\n # Result: \n ],\n \"sampler\": [\n [\"type\", \"\", \"independent\"],\n [\"sampleCount\", \"integer\", 1],\n ],\n },\n \"pbrt\": {\n \"Integrator\": [\n [\"type\", \"\", \"bdpt\"],\n [\"maxdepth\", \"integer\", 9], # Integer\n [\"pixelbounds\", \"integer\", [128, 384, 128, 384]], # List\n [\"lightsamplingstrategy\", \"string\", \"uniform\"], # String\n # Float is the same as integer\n # Boolean\n # [\"booleanParamName\", \"bool\", True]\n # or as an equivalent PBRT string:\n # [\"booleanParamName\", \"bool\", \"true\"]\n # Result: \"bool booleanParamName\" \"true\"\n ],\n \"Sampler\": [[\"type\", \"\", \"random\"], [\"pixelsamples\", \"integer\", 1]],\n },\n}\n\ntest_cases = [\n {\n \"name\": \"mitsubaCase\",\n \"description\": \"Mitsuba rendering\",\n \"renderer\": \"mitsubaRenderer_0_5\",\n \"params\": {\"base\": [\"mitsuba\"]},\n },\n {\n \"name\": \"pbrtCase\",\n \"description\": \"PBRT rendering\",\n \"renderer\": \"pbrt3Renderer\",\n \"params\": {\"base\": [\"pbrt\"]},\n },\n]\n","repo_name":"tazlarv/lteval","sub_path":"configurations/example_param_types.py","file_name":"example_param_types.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"12310148720","text":"from typing import Optional, Dict\n\nfrom selenium.webdriver import Chrome, ChromeOptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom .constants import CHROME_DRIVER\nfrom .pages.car_table import CarTablePage, TableBodyRow, TableHeadRow\n\n\ndef map_th_tr(thr: TableHeadRow, tbr: TableBodyRow):\n ret = {}\n for th, td in zip(thr.get_tds(), tbr.get_tds()):\n ret[th.value] = td.value\n\n return ret\n\n\nclass Extractor:\n def __init__(self, proxy: Optional[str] = None):\n options = ChromeOptions()\n options.add_argument('--disable-notifications')\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--window-size=1920,1080\")\n\n if proxy is not None:\n options.add_argument(f\"--proxy-server={proxy}\")\n\n self.driver: Chrome = Chrome(executable_path=str(CHROME_DRIVER), options=options)\n\n def extract_table_info(self) -> Dict[str, str]:\n car_details_page = CarTablePage(self.driver)\n\n table = car_details_page.get_table()\n\n if table is None:\n return {}\n\n thead = table.get_thead()\n tbody = table.get_tbody()\n\n thead_row = thead.get_rows()[0]\n\n tbody_rows = tbody.get_rows()\n needed_row = tbody_rows[0]\n\n mapped = map_th_tr(thead_row, needed_row)\n\n car_page = needed_row.get_car_page()\n model = mapped.get('Наименование')\n mark_model = car_page.get_mark_model()\n\n mark = mark_model.rstrip(model).strip()\n\n date = mapped.get('date') or mapped.get('Модификация выпускается с')\n option = mapped.get('Опции')\n\n ret = {\n 'url': car_page.get_url(),\n 'many_url': str(int(len(tbody_rows) > 1)),\n 'model': model,\n 'mark': mark,\n 'engine': mapped.get('Модификация') or mapped.get('Двигатель'),\n }\n\n if date:\n ret['date'] = date\n\n if option:\n ret['option'] = option\n\n return ret\n\n def extract_by_vin(self, vin: str) -> Dict[str, str]:\n self.driver.get(f\"https://emex.ru/catalogs/original/?screen=modifications&vin={vin}\")\n\n return self.extract_table_info()\n\n def extract_by_framenum(self, frame: str, framenum: str) -> Dict[str, str]:\n self.driver.get(f\"https://emex.ru/catalogs/original/?screen=modifications&frame={frame}&framenum={framenum}\")\n\n return self.extract_table_info()\n\n def close(self):\n try:\n self.driver.close()\n\n except:\n pass\n\n def __del__(self):\n self.close()\n","repo_name":"e92git/emex_selenium_parser","sub_path":"app/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"14622792683","text":"from validator import Validator\nimport re\nfrom datetime import datetime, timedelta\nimport ntplib\nimport logging\n\n\nclass ValidHost(Validator):\n\n def __init__(self):\n self.err_message = \"Not a valid host\"\n self.not_message = \"\"\n self.compiled_ip = re.compile('\\d+.\\d+.\\d+.\\d+')\n\n def __call__(self, value):\n return self.compiled_ip.match(value) or self.is_valid_dns(value)\n\n def is_valid_dns(self, value):\n\n if len(value) > 255:\n self.err_message = \"Url is greater than the 255 byte limit\"\n return False\n\n return self.segments_are_valid(value)\n\n def segments_are_valid(self, value):\n for s in value.split(\".\"):\n if len(s) > 63:\n self.err_message = \"URL segment too long: {0}\".format(s)\n return False\n\n return True\n\n\nclass ValidHostWithPort(ValidHost):\n\n def __init__(self):\n super().__init__()\n self.compiled_ip = re.compile('\\d+.\\d+.\\d+.\\d+:\\d+')\n\n def is_valid_dns(self, value):\n \"\"\"Overrides base class definition\"\"\"\n host, port = value.split(\":\")[-2:]\n if not self.check_port(port):\n return False\n\n if len(host) > 255:\n self.err_message = \"Url is greater than the 255 byte limit\"\n return False\n\n return self.segments_are_valid(host)\n\n def check_port(self, port):\n p = -1\n if not port:\n self.err_message = \"Host has no port associated\"\n return False\n\n try:\n p = int(port)\n except ValueError:\n self.err_message = \"Host has no port associated\"\n return False\n\n if 0 < p <= 65535:\n return True\n\n self.err_message = \"Host has invalid port number\"\n return False\n\n def segments_are_valid(self, value):\n for s in value.split(\".\"):\n if len(s) > 63:\n self.err_message = \"URL segment too long: {0}\".format(s)\n return False\n\n return True\n\n\nclass NTP(object):\n _ntp_client = ntplib.NTPClient()\n _last_updated = None\n _current_offset = 0\n\n @classmethod\n def get_server_time(cls):\n now = datetime.now()\n if not cls._last_updated or (now > (cls._last_updated + timedelta(seconds=300))):\n cls._last_updated = now\n try:\n cls._current_offset = cls._ntp_client.request('pool.ntp.org').offset\n except Exception as e:\n logging.exception(e)\n return datetime.now() - timedelta(milliseconds=cls._current_offset)\n","repo_name":"k0nserv/flyby","sub_path":"flyby/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"15033106454","text":"import dataiku\nimport pandas as pd\nfrom flask import request\n\n\n# Example:\n# As the Python webapp backend is a Flask app, refer to the Flask\n# documentation for more information about how to adapt this\n# example to your needs.\n# From JavaScript, you can access the defined endpoints using\n# getWebAppBackendUrl('first_api_call')\n\n@app.route('/first_api_call')\ndef first_call():\n max_rows = request.args.get('max_rows') if 'max_rows' in request.args else 500\n\n mydataset = dataiku.Dataset(\"REPLACE_WITH_YOUR_DATASET_NAME\")\n mydataset_df = mydataset.get_dataframe(sampling='head', limit=max_rows)\n\n # Pandas dataFrames are not directly JSON serializable, use to_json()\n data = mydataset_df.to_json()\n return json.dumps({\"status\": \"ok\", \"data\": data})\n","repo_name":"AdamJelley/automated-pdf-processor","sub_path":"web_apps/Qd2fGuq/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"49"} +{"seq_id":"72812488149","text":"from typing import List\n\nclass Solution:\n def repeatedNTimes(self, A: List[int]) -> int:\n for i in range(2, len(A)):\n if A[i] == A[i - 1] or A[i] == A[i - 2]:\n return A[i]\n return A[-1]\n\ntests = [\n [[1, 2, 3, 3], 3],\n [[2, 1, 2, 5, 3, 2], 2],\n [[5, 1, 5, 2, 5, 3, 5, 4], 5],\n [[9, 5, 6, 9], 9]\n]\n\nfor t in tests:\n sol = Solution()\n actual = sol.repeatedNTimes(t[0])\n print(\"The element repeated N times in\", t[0], \"->\", actual);\n assert(actual == t[1])\n","repo_name":"l33tdaima/l33tdaima","sub_path":"p961e/repeated_n_times.py","file_name":"repeated_n_times.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"30727994993","text":"\"\"\"\nExample relay script for a UR5 robot and a HoloLens device.\n\nAuthor: Chengyuan Ma\n\nThis script creates a communication bridge between a Universal Robots UR5 robotic arm and a HoloLens device. \nIt allows the HoloLens to send control commands to the UR5 and receive real-time feedback of the robot's joint torques. \nThe script uses the rtde_control and rtde_receive modules to interface with the UR5, and asyncio to handle concurrent \ncommunication with the HoloLens.\n\nThe main components of the script are:\n\n comm_robot(): This function establishes a connection with the UR5 and manages its motion based on the received commands.\n comm_hololens(): This function sets up an asyncio server to handle incoming connections and messages from the HoloLens.\n main(): This function combines the robot and HoloLens communication tasks, running them concurrently using asyncio and ThreadPoolExecutor.\n\nThe script continuously listens for messages from the HoloLens and processes the received commands to control the UR5 robot.\n\"\"\"\n\nimport asyncio # Import the asyncio library for asynchronous I/O\nimport socket # Import the socket library for determining IP address\nimport concurrent.futures # Import the concurrent.futures library for thread-based parallelism\nimport struct # Import the struct library for working with C-style data structures\nimport time # Import the struct library for working with C-style data structures\n\nfrom mock_rtde import MockRTDEControlInterface as RTDEControlInterface\nfrom mock_rtde import MockRTDEReceiveInterface as RTDEReceiveInterface\n\ntorques = [0] * 6 # Initialize a list of 6 zeroes to store torque values for each joint\nmessages = [\"DOWN\"] # Initialize a list with the initial message \"DOWN\"\nexecutor = concurrent.futures.ThreadPoolExecutor(max_workers=1) # Create a ThreadPoolExecutor with a single worker\n\ndef get_ip_address():\n try:\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ip_address\n except:\n return \"Error: Unable to get IP address\"\n\ndef comm_robot():\n rtde_c = RTDEControlInterface(\"169.254.9.43\") # Create a control interface with the robot's IP address\n rtde_r = RTDEReceiveInterface(\"169.254.9.43\") # Create a receive interface with the robot's IP address\n print(\"Connected to UR5\") # Print a message to indicate successful connection to the robot\n up_pose = rtde_r.getActualTCPPose() # Get the current Tool Center Point pose of the robot\n down_pose = up_pose[:] # Create a copy of the current TCP pose for the down pose\n down_pose[2] -= 0.1 # Decrease the z-coordinate of the down pose by 0.1 meters\n while True: \n global torques # Declare the torques variable as global\n torques = rtde_c.getJointTorques() # Update the torques variable with the current joint torques\n while len(messages) > 0: # While there are messages in the list\n message = messages.pop() # Pop the last message from the list\n print(f'Received Message: {message}')\n if message == \"UP\": # If the message is \"UP\"\n rtde_c.stopL(10.0) # Stop the robot's linear motion with a deceleration of 10 m/s^2\n rtde_c.moveL(up_pose, asynchronous=True) # Move the robot to the up_pose asynchronously\n elif message == \"DOWN\": # If the message is \"DOWN\"\n rtde_c.stopL(10.0) # Stop the robot's linear motion with a deceleration of 10 m/s^2\n rtde_c.moveL(down_pose, asynchronous=True) # Move the robot to the down_pose asynchronously\n time.sleep(0.01) # Sleep for 10 milliseconds to reduce CPU usage\n\n\nasync def comm_hololens(): # Define the asynchronous function that communicates with the HoloLens\n async def callback(reader: asyncio.StreamReader, writer: asyncio.StreamWriter): # Define the asynchronous callback function for the HoloLens connection\n print(\"Connected to HoloLens\") \n while True:\n try:\n message = (await reader.read(1024)).decode() # Read up to 1024 bytes from the reader and decode the message\n if message != \" \": # If the message is not empty\n messages.append(message) # add the message to the messages list to be read by the UR5\n writer.write(struct.pack(\"!\" + \"f\" * 6, *torques)) # Write the torques list to the writer as a packed binary string, using network byte order (\"!\") and 6 floating-point values (\"f\" * 6), one for each torque value in the list. The resulting packed binary string is then written to the writer, which sends the data over the network connection to the HoloLens device.\n await writer.drain() # Wait for the writer to finish sending data\n except:\n break\n\n\n await asyncio.start_server(callback, host=\"0.0.0.0\", port=21200) # Start the server, listening on all available network interfaces and using port 21200, with the callback function to handle connections. Note that the host number is basically just the IP address, and the port number refers to a port of communication to host your application on.\n\nasync def receive_image(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):\n print(\"Connected to Image Sender\")\n while True:\n try:\n # Receive the image size\n image_size_data = await reader.readexactly(4)\n image_size = struct.unpack(\"!I\", image_size_data)[0]\n\n # Receive the image data\n image_data = await reader.readexactly(image_size)\n \n # Save the received image\n with open(\"received_image.png\", \"wb\") as img_file:\n img_file.write(image_data)\n\n # Send an acknowledgment to the sender\n writer.write(b\"ACK\")\n await writer.drain()\n\n except asyncio.IncompleteReadError:\n break\n\nasync def main():\n print(f\"Running on IP: {get_ip_address()}\") # Add this line\n\n loop = asyncio.get_running_loop()\n await asyncio.gather(\n comm_hololens(),\n loop.run_in_executor(executor, comm_robot),\n asyncio.start_server(receive_image, host=\"0.0.0.0\", port=21201),\n )\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"Yuxiang-Ma/2.12-Hololens2","sub_path":"Comms/Mock/mock_server.py","file_name":"mock_server.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"23937027980","text":"import numpy as np\r\n\r\nX=np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]])\r\ny=np.array([[1],[1],[0]])\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\ndef sigmoid_derivative(x):\r\n return x * (1 - x)\r\n\r\nepochs = 50000\r\nalpha = 0.01\r\ninput_neurons = 4\r\noutput_neuros = 1\r\nhidden_neurons = 3\r\n\r\nwh = np.random.uniform(size=(input_neurons,hidden_neurons))\r\nbh = np.random.uniform(size=(1,hidden_neurons))\r\nwout = np.random.uniform(size=(hidden_neurons,output_neuros))\r\nbout = np.random.uniform(size=(1,output_neuros))\r\n\r\nfor i in range(epochs):\r\n# Feedforward\r\n z1 = np.dot(X,wh) + bh\r\n hidden_layer = sigmoid(z1)\r\n \r\n z2 = np.dot(hidden_layer,wout)\r\n output_layer = sigmoid(z2)\r\n\r\n# Backpropagation \r\n E = y - output_layer\r\n \r\n slope_output_layer = sigmoid_derivative(output_layer)\r\n slope_hidden_layer = sigmoid_derivative(hidden_layer)\r\n \r\n d_output = E * slope_output_layer\r\n error_hidden_layer = d_output.dot(wout.T)\r\n d_hidden_layer = error_hidden_layer * slope_hidden_layer\r\n \r\n wout += hidden_layer.T.dot(d_output) * alpha\r\n bout += np.sum(d_output, axis=0, keepdims=True) * alpha\r\n wh += X.T.dot(d_hidden_layer) * alpha\r\n bh += np.sum(d_hidden_layer) * alpha\r\n\r\nprint(output_layer)","repo_name":"ravi4all/DeepLearning_Jan_2020","sub_path":"01-ANN.py","file_name":"01-ANN.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"26305083460","text":"###################双向检索评估效果######################################\nimport numpy as np\nimport random\nfrom torch.utils.data.dataset import Dataset\nfrom scipy.io import loadmat, savemat\nfrom torch.utils.data import DataLoader\nimport numpy \nimport sys\nimport importlib\nimportlib.reload(sys)\nimport torch\nfrom options import TestOptions\nfrom dataset import dataset_single\nfrom model import DRIT\nfrom saver import save_imgs\nimport os\nimport random\ndef rank_i2t(images, captions,npts=None):\n\n if npts is None:\n npts = int(images.shape[0] / 5)\n\n ranks = numpy.zeros(npts)\n top1 = numpy.zeros(npts)\n\t\t\n length=captions.shape[0]\n for index in range(npts):\n \n # Get query image\n im = images[5 * index]\n distance={}\n for i in range(length):\n distance[i]= np.linalg.norm(im - captions[i])\n distance_sorted = sorted(distance.items(), key=lambda x:x[1])\n \n # Score\n rank = 1e20\n for i in range(5 * index, 5 * index + 5, 1):\n tmp = np.where(np.array(distance_sorted) == distance[i])[0][0]\n if tmp < rank:\n rank = tmp#选择五个中离他最近的index\n ranks[index] = rank\n\t\t\n \n # Compute metrics\n r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)\n r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)\n r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)\n medr = numpy.floor(numpy.median(ranks)) + 1\n meanr = ranks.mean() + 1\n return (r1, r5, r10), medr\n\ndef rank_t2i( images, captions,npts=None):\n\n if npts is None:\n npts = int(images.shape[0] / 5)\n ims = numpy.array([images[i] for i in range(0, len(images), 5)])\n\n ranks = numpy.zeros(5 * npts)\n top1 = numpy.zeros(5 * npts)\n for i in range(len(captions)):\n cap = captions[i]\n distance={}\n for index in range(npts):\n distance[index] = np.linalg.norm(cap - ims[index])\n distance_sorted = sorted(distance.items(), key=lambda x:x[1])\n\t\t\n tmp = np.where(np.array(distance_sorted) == distance[int(i/5)])[0][0]\n ranks[i] = tmp\n \n\n # Compute metrics\n r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)\n r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)\n r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)\n medr = numpy.floor(numpy.median(ranks)) + 1\n return (r1, r5, r10), medr\n\n\t\n\t\n","repo_name":"sunyue11/DRIT","sub_path":"f8k_8/recall_evaluate.py","file_name":"recall_evaluate.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"39878331130","text":"from discord.ext import commands \nimport time \nimport random \n\n\n\nclass Leveling(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot \n\n self.xp = {} # a dictionary: user: xp\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n if msg.author.bot:\n return \n \n if msg.guild is None:\n return\n \n increase = random.randint(5, 15)\n\n if msg.author in self.xp:\n self.xp[msg.author] += increase\n else:\n self.xp[msg.author] = increase\n\nasync def setup(bot):\n await bot.add_cog(Leveling(bot))\n \n \n \n ","repo_name":"Stormtorch002/lunabot","sub_path":"sheesh.py","file_name":"sheesh.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"7411160996","text":"\"\"\"\n\nImplement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.\nIf such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).\nThe replacement must be in-place and use only constant extra memory.\n\n\"\"\"\n\nimport time\n\n\nclass Solution:\n def next_per(self, arr):\n prefix = len(arr)-2\n while prefix > -1 and arr[prefix] >= arr[prefix+1]:\n prefix -= 1\n\n if prefix == -1:\n return arr.reverse()\n\n suffix = len(arr)-1\n while suffix > prefix:\n if arr[suffix] > arr[prefix]:\n arr[suffix], arr[prefix] = arr[prefix], arr[suffix]\n suffix -= 1\n\n arr[prefix+1:] = reversed(arr[prefix+1:])\n return arr\n\n\nif __name__ == \"__main__\":\n s = Solution()\n start = time.time()\n print(\"enter a list of integers\")\n arr = input()\n arr = list(map(int, arr.split(',')))\n print(s.next_per(arr))\n print(\"time taken: {} seconds\".format(time.time() - start))\n ","repo_name":"vijaygupta18/Hacktober-2020","sub_path":"Python_programs/next_permutation.py","file_name":"next_permutation.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"49"} +{"seq_id":"44528477698","text":"\"\"\"Given two sets of bounding boxes, compute mean average precision as intersection/union\n\n\"\"\"\n# Usage: compute_mean_average_prediction.py path_to_images.txt_file\n# Accept .txt file like YOLO\n# .txt path has full path to images\n# path to ground truth is path_to_images/../labels\n# path to predictions is path_to_images/../predicted_labels\n#\n\n# Assume python > 3.6\n\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport sys\n\ndef _read_bounding_box(image_path, bb_rel_path=None):\n \"\"\"Read bounding box for an image\"\"\"\n\n # Image path is path to image\n # bb_rel_path is list containing relative path to bb from image dir\n \n if bb_rel_path is None:\n bb_rel_path = \"\"\n elif type(bb_rel_path) == list:\n bb_rel_path = os.path.sep.join(bb_rel_path)\n\n im_dir, fname = os.path.split(image_path)\n fname = os.path.splitext(fname)[0] + \".txt\"\n bb_path = os.path.join(im_dir,bb_rel_path, fname)\n with open(bb_path, \"rt\") as fid:\n bb = [ [float(l2) for l2 in l.strip(\"\\n\").split(\" \")] for l in fid.readlines()]\n\n bb = np.array(bb)\n\n # Return labels and bbs separately\n return bb[:,0], bb[:,1:]\n\ndef _scale_bb(bbs, width, height):\n scale = np.array((width, height, width, height), dtype=float)\n bbs_scaled = []\n for bb in bbs:\n x_start = bb[0] - bb[2]/2\n y_start = bb[1] - bb[3]/2\n x_end = x_start + bb[2]\n y_end = y_start + bb[3]\n bbs_scaled.append(np.array((x_start, y_start, x_end, y_end) * scale, dtype=int))\n return bbs_scaled\n\n\ndef _plot_rects(im, bb1, bb2, ax=None):\n \"\"\"Plot rectangles of bounding boxes on image\"\"\"\n if ax is None:\n ax=plt.gca()\n ax.imshow(im)\n\n height, width = im.shape[0:2]\n for bb in _scale_bb(bb1, width, height):\n ax.plot([bb[0], bb[0], bb[2], bb[2], bb[0]],[bb[1],bb[3], bb[3], bb[1], bb[1]], 'r')\n for bb in _scale_bb(bb2, width, height):\n ax.plot([bb[0], bb[0], bb[2], bb[2], bb[0]],[bb[1],bb[3], bb[3], bb[1], bb[1]], 'b')\n return ax\n \n\ndef _compute_mean_average_precision(width, height, bb1, bb2):\n \"\"\"Compute the mean average precision given bounding box and im size\"\"\"\n\n # Inputs: path to image, path to ground truth bbs, path to predicted bbs\n # Assume yolo format for bbs:\n # label xc, yc, w, h\n # xc,yc are relative to top-left of image and normalised by image w/h\n # w,h are also normalised by image w/h\n\n \n im_gt = np.full((width, height), fill_value=False, dtype=bool)\n im_pred = np.full((width, height),fill_value=False, dtype=bool)\n\n # For each bounding box scale to dimensions of real image\n scale = np.array((width, height, width, height), dtype=float)\n for bb in bb1:\n x_start = bb[0] - bb[2]/2\n y_start = bb[1] - bb[3]/2\n x_end = x_start + bb[2]\n y_end = y_start + bb[3]\n x_start, y_start, x_end, y_end = np.array((x_start, y_start, x_end, y_end) * scale, dtype=int)\n\n im_gt[x_start:x_end, y_start:y_end] = True\n #print(f\"{x_start},{x_end},{y_start},{y_end}\")\n #im_gt[0:10, 20:30] = True\n\n for bb in bb2:\n x_start = bb[0] - bb[2]/2\n y_start = bb[1] - bb[3]/2\n x_end = x_start + bb[2]\n y_end = y_start + bb[3]\n x_start, y_start, x_end, y_end = np.array((x_start, y_start, x_end, y_end) * scale, dtype=int)\n\n im_pred[x_start:x_end, y_start:y_end] = True\n\n return len(np.where(im_gt & im_pred)[0]) / len(np.where(im_gt | im_pred)[0])\n \n\ndef main(): \n parser = argparse.ArgumentParser(\n description = \"Compute mean average precision\"\n )\n parser.add_argument(\"-i\", dest=\"input_path\", required=True,\n help=\"Path to file containing paths to images\"\n )\n parser.add_argument(\"-g\", dest=\"gt_path\", default=\"../labels\",\n help=\"Relative path to ground truth\"\n )\n parser.add_argument(\"-p\", dest=\"pred_path\", default=\"../predictions\",\n help=\"Relative path to ground truth\"\n )\n parser.add_argument(\"-o\", dest=\"output_path\", default=\"./mean_average_precision.csv\",\n help=\"Path to save output\"\n )\n parser.add_argument(\"-m\", \"--montage\", dest=\"montage\", \n default=False, action='store_true',\n help=\"Create montage of overlay of groundtruth and predictions\"\n )\n parser.add_argument(\"--ncols\", dest=\"ncols\", type=int, default=3,\n help=\"Number of columns for montage\"\n )\n parser.add_argument(\"--nrows\", dest=\"nrows\", type=int, default=\"6\",\n help=\"Maximum number of rows per page\"\n )\n parser.add_argument(\"--gt-colour\", dest=\"gt_color\", default=\"r\",\n help=\"Color for ground truth rectangles\"\n )\n parser.add_argument(\"--pred-colour\", dest=\"pred_color\", default=\"g\",\n help=\"Color for predicted rectangles\"\n )\n\n args = parser.parse_args()\n input_path = args.input_path\n gt_path = args.gt_path\n pred_path = args.pred_path\n \n # Get list of images\n with open(input_path, \"rt\") as fid:\n images_to_process = [i.strip() for i in fid.readlines()]\n \n n_images = len(images_to_process)\n print(f\"N images = {n_images}\")\n\n if args.montage:\n ncols = int(np.min((n_images, args.ncols)))\n #nrows = int(np.ceil(n_images/ncols))\n nrows = args.nrows\n fig, axs = plt.subplots(nrows, ncols, figsize=(7,9),\n subplot_kw={'xticks': [], 'yticks': []})\n n_per_page = ncols * nrows\n # Take away 1 because of zero offset\n\n # Get mAP for each image\n mean_average_precision = \"\"\n for i, image_path in enumerate(images_to_process, start=0):\n # Open image and get width/height\n im = plt.imread(image_path)\n height, width = im.shape[0:2]\n\n # Read ground truth bounding boxes\n labels_gt, bb_gt = _read_bounding_box(image_path, gt_path)\n labels_pred, bb_pred = _read_bounding_box(image_path, pred_path)\n m_ap = _compute_mean_average_precision(width, height, bb_gt, bb_pred)\n mean_average_precision += f\"{image_path},{m_ap}\\n\"\n\n # If necessary plot gt and prediction\n if args.montage:\n row = int(np.floor(i/ncols))\n col = i%ncols\n print(f\"row={row}, col={col}\")\n if nrows < 2:\n ax = axs[col]\n else:\n ax = axs[row][col]\n _plot_rects(im, bb_gt, bb_pred, ax)\n\n image_name = Path(image_path).name\n ax.set_title(f\"{image_name} - {m_ap:.2f}\")\n \n \n # Write out processed values\n output_path = args.output_path\n \n with open(output_path, \"wt\") as fid:\n fid.writelines(mean_average_precision)\n\n # Compute montage if required\n if args.montage:\n montage_path = os.path.splitext(output_path)[0] + \".png\"\n plt.savefig(montage_path)\n \n \nif __name__ == \"__main__\":\n main()\n \n","repo_name":"cvmloffice/counting-vertebrae","sub_path":"mean_average_precision/scripts/utils/compute_mean_average_precision.py","file_name":"compute_mean_average_precision.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"1067320625","text":"import time\nimport h5py\nimport numpy as np\nimport pandas as pds\nimport astropy.io.fits as fits\nimport subprocess as subpro\n\nimport astropy.units as U\nimport astropy.constants as C\nimport astropy.wcs as awc\nimport astropy.io.ascii as asc\n\nfrom io import StringIO\nfrom astropy import cosmology as apcy\nfrom astropy.coordinates import SkyCoord\n\n#.\nfrom fig_out_module import star_pos_func, tractor_peak_pos\nfrom fig_out_module import zref_BCG_pos_func\nfrom img_pre_selection import WCS_to_pixel_func\n\nfrom img_mask_adjust import adjust_mask_func\nfrom img_resample import resamp_func\n\n#.\nfrom mpi4py import MPI\ncommd = MPI.COMM_WORLD\nrank = commd.Get_rank()\ncpus = commd.Get_size()\n\n###.cosmology\nrad2asec = U.rad.to(U.arcsec)\nTest_model = apcy.Planck15.clone(H0 = 67.74, Om0 = 0.311)\nH0 = Test_model.H0.value\nh = H0/100\nOmega_m = Test_model.Om0\nOmega_lambda = 1.-Omega_m\nOmega_k = 1.- (Omega_lambda + Omega_m)\n\npixel = 0.396\nz_ref = 0.25\nband = ['r', 'g', 'i']\n\n\n### === ### offset correction\nload = '/home/xkchen/fig_tmp/'\nhome = '/home/xkchen/data/SDSS/'\nout_path = '/home/xkchen/data/SDSS/photo_files/pos_offset_correct_imgs/'\n\ncat_lis = ['low_BCG_star-Mass', 'high_BCG_star-Mass']\n\n\"\"\"\nfor mm in range( 2 ):\n\n\tband_str = band[ rank ]\n\n\tdat = pds.read_csv( load + 'Extend_Mbcg_cat/%s_%s-band_photo-z-match_BCG_cat.csv' % (cat_lis[mm], band_str),)\n\tra, dec, z = np.array(dat.ra), np.array(dat.dec), np.array(dat.z)\n\n\tNs = len( z )\n\n\toff_pk_x = np.zeros( Ns )\n\toff_pk_y = np.zeros( Ns )\n\n\t### offset to peak position record\n\tfor jj in range( Ns ):\n\n\t\tra_g, dec_g, z_g = ra[jj], dec[jj], z[jj]\n\n\t\tDa_g = Test_model.angular_diameter_distance(z_g).value\n\t\tL_pix = Da_g * 10**3 * pixel / rad2asec\n\n\t\t##...SExTractor sources\n\t\tgalx_file = home + 'photo_files/detect_source_cat/photo-z_img_%s-band_mask_ra%.3f_dec%.3f_z%.3f.cat' % (band_str, ra_g, dec_g, z_g)\n\t\timg_file = home + 'photo_data/frame-%s-ra%.3f-dec%.3f-redshift%.3f.fits.bz2' % (band_str, ra_g, dec_g, z_g)\n\n\t\tcen_x, cen_y, peak_x, peak_y = tractor_peak_pos( img_file, galx_file )\n\n\t\timg_data = fits.open( img_file )\n\t\tHead_info = img_data[0].header\n\n\t\tbcg_x, bcg_y = WCS_to_pixel_func( ra_g, dec_g, Head_info)\n\n\t\t## star catalog\n\t\tstar_file = home + 'photo_files/star_cats/source_SQL_Z%.3f_ra%.3f_dec%.3f.csv' % (z_g, ra_g, dec_g)\n\t\tcm_x0, cm_y0, cm_A0, cm_B0, cm_chi0 = star_pos_func( star_file, Head_info, pixel )[:5]\n\n\t\tNs0 = len( cm_x0 )\n\n\t\ttarg_order = []\n\n\t\tfor ii in range( Ns0 ):\n\n\t\t\tds = np.sqrt( ( cen_x - cm_x0[ii] )**2 + ( cen_y - cm_y0[ii] )**2 )\n\t\t\tid_x = np.where( ds == np.nanmin( ds ) )[0][0]\n\t\t\ttarg_order.append( id_x )\n\n\t\ttarg_order = np.array( targ_order )\n\n\t\t## stars in frame region\n\t\tid_limx = ( cm_x0 >= 0 ) & ( cm_x0 <= 2048 )\n\t\tid_limy = ( cm_y0 >= 0 ) & ( cm_y0 <= 1489 )\n\t\tid_lim = id_limx & id_limy\n\n\t\tlim_s_x0, lim_s_y0 = cm_x0[ id_lim ], cm_y0[ id_lim ]\n\t\tlim_order = targ_order[ id_lim ]\n\n\t\t## offset\n\t\toff_cen = np.sqrt( ( cen_x[ lim_order ] - lim_s_x0 )**2 + ( cen_y[ lim_order ] - lim_s_y0 )**2 )\n\t\toff_peak = np.sqrt( ( peak_x[ lim_order ] - lim_s_x0 )**2 + ( peak_y[ lim_order ] - lim_s_y0 )**2 )\n\n\t\tdevi_cenx = cen_x[ lim_order ] - lim_s_x0\n\t\tdevi_ceny = cen_y[ lim_order ] - lim_s_y0\n\n\t\tdevi_pkx = peak_x[ lim_order ] - lim_s_x0\n\t\tdevi_pky = peak_y[ lim_order ] - lim_s_y0\n\n\t\t## corrected BCG position\n\t\tmedi_x2pk_off = np.median( devi_pkx )\n\t\tmedi_y2pk_off = np.median( devi_pky )\n\n\t\tp_pk_x, p_pk_y = bcg_x + medi_x2pk_off, bcg_y + medi_y2pk_off\n\n\t\toff_pk_x[jj] = p_pk_x\n\t\toff_pk_y[jj] = p_pk_y\n\n\t\t##.. save the off set array\n\t\tkeys = [ 'offset2cen', 'offset2peak', 'devi_cenx', 'devi_ceny', 'devi_pk_x', 'devi_pk_y' ]\n\t\tvalues = [ off_cen, off_peak, devi_cenx, devi_ceny, devi_pkx, devi_pky ]\n\t\tfill = dict( zip( keys, values ) )\n\t\tdata = pds.DataFrame( fill )\n\t\tdata.to_csv( out_path + 'offset/%s-band_ra%.3f_dec%.3f_z%.3f_star-pos-offset.csv' % (band_str, ra_g, dec_g, z_g),)\n\n\t### BCG position with offset adjust\n\t#. save the BCG position with offset correction\n\tkeys = [ 'ra', 'dec', 'z', 'bcg_x', 'bcg_y' ]\n\tvalues = [ ra, dec, z, off_pk_x, off_pk_y ]\n\tfill = dict( zip(keys, values) )\n\tdata = pds.DataFrame( fill )\n\tdata.to_csv( load + 'Extend_Mbcg_cat/%s_%s-band_photo-z-match_pk-offset_cat.csv' % (cat_lis[mm], band_str),)\n\n\t#. BCG position at z_ref\n\tcat_file = load + 'Extend_Mbcg_cat/%s_%s-band_photo-z-match_pk-offset_cat.csv' % (cat_lis[mm], band_str)\n\tout_file = load + 'Extend_Mbcg_cat/%s_%s-band_photo-z-match_pk-offset_cat_z-ref.csv' % (cat_lis[mm], band_str)\n\tzref_BCG_pos_func(cat_file, z_ref, out_file, pixel)\n\n\tprint( '%s band done !' % band_str )\n\n\"\"\"\n\n### === ### masking and resampling\nfor kk in range( 3 ):\n\n\tband_str = band[ kk ]\n\n\tfor mm in range( 2 ):\n\n\t\tdat = pds.read_csv( load + 'Extend_Mbcg_cat/%s_%s-band_pre-diffi_BCG_cat.csv' % (cat_lis[mm], band_str),)\n\t\tra, dec, z = np.array(dat.ra), np.array(dat.dec), np.array(dat.z)\n\n\t\tsub_coord = SkyCoord( ra * U.deg, dec * U.deg )\n\n\t\t#. match the position of BCGs\n\t\tref_dat = pds.read_csv( load + \n\t\t\t\t\t\t\t\t'Extend_Mbcg_cat/%s_%s-band_photo-z-match_pk-offset_cat.csv' % (cat_lis[mm], band_str),)\n\t\tref_ra, ref_dec = np.array( ref_dat['ra'] ), np.array( ref_dat['dec'] )\n\t\tref_bcgx, ref_bcgy = np.array( ref_dat['bcg_x'] ), np.array( ref_dat['bcg_y'] )\n\n\t\tref_coord = SkyCoord( ref_ra * U.deg, ref_dec * U.deg )\n\n\t\tidx, sep, d3d = sub_coord.match_to_catalog_sky( ref_coord )\n\t\tid_lim = sep.value < 2.7e-4\n\n\t\tclus_x, clus_y = ref_bcgx[ idx[ id_lim ] ], ref_bcgy[ idx[ id_lim ] ] ###. position have applied offset correction\n\n\t\tzN = len( ra )\n\t\tprint( zN )\n\t\tprint( clus_x.shape )\n\n\t\tm, n = divmod(zN, cpus)\n\t\tN_sub0, N_sub1 = m * rank, (rank + 1) * m\n\t\tif rank == cpus - 1:\n\t\t\tN_sub1 += n\n\n\t\tset_z, set_ra, set_dec = z[N_sub0 : N_sub1], ra[N_sub0 : N_sub1], dec[N_sub0 : N_sub1]\n\t\tset_imgx, set_imgy = clus_x[N_sub0 : N_sub1], clus_y[N_sub0 : N_sub1]\n\n\n\t\t##.. masking (exclude BCGs)\n\t\td_file = home + 'photo_data/frame-%s-ra%.3f-dec%.3f-redshift%.3f.fits.bz2'\n\t\tcat_file = home + 'photo_files/star_cats/source_SQL_Z%.3f_ra%.3f_dec%.3f.csv'\n\t\toffset_file = home + 'photo_files/pos_offset_correct_imgs/offset/%s-band_ra%.3f_dec%.3f_z%.3f_star-pos-offset.csv'\n\n\t\tgal_file = home + 'photo_files/detect_source_cat/photo-z_img_%s-band_mask_ra%.3f_dec%.3f_z%.3f.cat'\n\t\tbcg_photo_file = home + 'photo_files/BCG_photometry/BCG_photo_Z%.3f_ra%.3f_dec%.3f.txt'\n\n\t\tout_file = home + 'photo_files/pos_offset_correct_imgs/mask_img/photo-z_mask_%s_ra%.3f_dec%.3f_z%.3f.fits'\n\n\t\tbcg_mask = 0\n\n\t\tif band_str == 'r':\n\t\t\textra_cat = [ home + 'photo_files/detect_source_cat/photo-z_img_g-band_mask_ra%.3f_dec%.3f_z%.3f.cat', \n\t\t\t\t\t\t home + 'photo_files/detect_source_cat/photo-z_img_i-band_mask_ra%.3f_dec%.3f_z%.3f.cat']\n\n\t\t\textra_img = [ home + 'photo_data/frame-g-ra%.3f-dec%.3f-redshift%.3f.fits.bz2',\n\t\t\t\t\t\t home + 'photo_data/frame-i-ra%.3f-dec%.3f-redshift%.3f.fits.bz2']\n\n\t\tif band_str == 'g':\n\t\t\textra_cat = [ home + 'photo_files/detect_source_cat/photo-z_img_r-band_mask_ra%.3f_dec%.3f_z%.3f.cat', \n\t\t\t\t\t\t home + 'photo_files/detect_source_cat/photo-z_img_i-band_mask_ra%.3f_dec%.3f_z%.3f.cat']\n\n\t\t\textra_img = [ home + 'photo_data/frame-r-ra%.3f-dec%.3f-redshift%.3f.fits.bz2',\n\t\t\t\t\t\t home + 'photo_data/frame-i-ra%.3f-dec%.3f-redshift%.3f.fits.bz2']\n\n\t\tif band_str == 'i':\n\t\t\textra_cat = [ home + 'photo_files/detect_source_cat/photo-z_img_r-band_mask_ra%.3f_dec%.3f_z%.3f.cat',\n\t\t\t\t\t\t home + 'photo_files/detect_source_cat/photo-z_img_g-band_mask_ra%.3f_dec%.3f_z%.3f.cat']\n\n\t\t\textra_img = [ home + 'photo_data/frame-r-ra%.3f-dec%.3f-redshift%.3f.fits.bz2',\n\t\t\t\t\t\t home + 'photo_data/frame-g-ra%.3f-dec%.3f-redshift%.3f.fits.bz2']\n\n\t\tadjust_mask_func( d_file, cat_file, set_z, set_ra, set_dec, band_str, gal_file, out_file, bcg_mask,\n\t\t\toffset_file = offset_file, bcg_photo_file = bcg_photo_file, extra_cat = extra_cat, extra_img = extra_img,)\n\n\t\tprint( '%d, %s band, masking done !' % (mm, band_str),)\n\n\n\t\t##.. pixel resample\n\t\tmask_file = home + 'photo_files/pos_offset_correct_imgs/mask_img/photo-z_mask_%s_ra%.3f_dec%.3f_z%.3f.fits'\n\t\tresamp_file = home + 'photo_files/pos_offset_correct_imgs/resamp_img/photo-z_resamp_%s_ra%.3f_dec%.3f_z%.3f.fits'\n\n\t\tresamp_func( mask_file, set_z, set_ra, set_dec, set_imgx, set_imgy, band_str, resamp_file, z_ref,\n\t\t\tstack_info = None, pixel = 0.396, id_dimm = True,)\n\n\t\tprint( '%d, %s band, resample done !' % (mm, band_str),)\n\n\n","repo_name":"Kein-Cary/Intracluster-Light","sub_path":"txt_figs/rematch_catalog/test_rematch_BCGM_cat_process.py","file_name":"test_rematch_BCGM_cat_process.py","file_ext":"py","file_size_in_byte":8342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"6328874458","text":"from threading import Thread\n\nfrom src.hw3.safe_dictionary import SafeDictionary\n\n\ndef test_safe_dictionary_multithread():\n safe_dictionary = SafeDictionary()\n\n with safe_dictionary.modify() as sd:\n sd[2] = 10\n\n def dictionary_modify(safe_dict, key: int, value: int):\n with safe_dict.modify() as sd:\n sd[key] = value\n\n threads = [Thread(target=dictionary_modify, args=(safe_dictionary, key, key**2)) for key in range(1, 4)]\n\n for thread in threads:\n thread.start()\n\n for thread in threads:\n thread.join()\n\n with safe_dictionary.modify() as sd:\n assert sd[1] == 1\n assert sd[2] == 4\n assert sd[3] == 9\n","repo_name":"samorojy/spbu_python_course","sub_path":"test/hw3/test_safe_dictionary.py","file_name":"test_safe_dictionary.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73799665749","text":"import torch\nfrom torch import nn\nfrom torch import optim\nfrom enum import Enum\n\nfrom .modules import LoRAWLinear, LoRAWConv1d\nfrom .util import *\nfrom .attributes import *\n\n\nclass TargetableModules(Enum):\n Linear = LoRAWLinear\n Conv1d = LoRAWConv1d\n\n\ndef scan_model(model, target_blocks, whitelist=None, blacklist=None):\n # Find all targetable modules that are in targeted blocks\n target_blocks = set(target_blocks)\n # If a whitelist is specified, modules must have at least one whitelisted ancestor\n whitelist = set(whitelist) if whitelist is not None else None\n # If a blacklist is specified, modules must have no blacklisted ancestors\n blacklist = set(blacklist) if blacklist is not None else None\n module_map = {}\n for ancestor_name, ancestor_module in model.named_modules():\n ancestor_set = set(ancestor_name.split(\".\"))\n if (\n ancestor_module.__class__.__name__ in target_blocks\n and (whitelist is None or not ancestor_set.isdisjoint(whitelist))\n and (blacklist is None or ancestor_set.isdisjoint(blacklist))\n ):\n for decendant_name, decendant_module in ancestor_module.named_modules():\n if decendant_module.__class__.__name__ in TargetableModules.__members__:\n # Get parent if child is not a direct decendant\n for name in decendant_name.split(\".\")[:-1]:\n ancestor_module = ancestor_module._modules[name]\n # Since '.' is not allowed, replace with '/' (makes it look like a path)\n id = f\"{ancestor_name}.{decendant_name}\".replace(\".\", \"/\")\n module_map[id] = {\n \"module\": decendant_module,\n \"parent\": ancestor_module,\n }\n print(f\"Found {len(module_map)} candidates for LoRAW replacement\")\n return module_map\n\n\nclass LoRAWNetwork(nn.Module):\n def __init__(\n self,\n target_map,\n multiplier=1.0,\n lora_dim=16,\n alpha=1.0,\n dropout=None,\n module_dropout=None,\n ):\n super().__init__()\n self.active = False\n self.multiplier = multiplier\n self.lora_dim = lora_dim\n self.alpha = alpha\n self.dropout = dropout\n self.module_dropout = module_dropout\n self.lora_modules = nn.ModuleDict()\n # Scan model and create loras for respective modules\n for name, info in target_map.items():\n module = info[\"module\"]\n self.lora_modules[name] = TargetableModules[\n module.__class__.__name__\n ].value(\n name,\n module,\n multiplier=multiplier,\n lora_dim=lora_dim,\n alpha=alpha,\n dropout=dropout,\n module_dropout=module_dropout,\n )\n\n def activate(self, target_map):\n for name, module in self.lora_modules.items():\n module.inject(target_map[name][\"parent\"])\n self.active = True\n print(f\"Injected {len(self.lora_modules)} LoRAW modules into model\")\n\n def activate_forward(self):\n for _, module in self.lora_modules.items():\n module.inject_forward()\n self.active = True\n print(f\"Forwarded {len(self.lora_modules)} LoRAW modules into model\")\n\n def set_multiplier(self, multiplier):\n self.multiplier = multiplier\n for _, module in self.lora_modules.items():\n module.multiplier = self.multiplier\n\n\nclass LoRAWWrapper:\n def __init__(\n self,\n target_model,\n model_type=None,\n target_blocks=[\"Attention\"],\n component_whitelist=None,\n multiplier=1.0,\n lora_dim=16,\n alpha=1.0,\n dropout=None,\n module_dropout=None,\n ):\n self.target_model = target_model\n self.model_type = model_type\n self.target_blocks = target_blocks\n self.component_whitelist = component_whitelist\n\n self.is_active = False\n self.is_trainable = False\n\n # Gather candidates for replacement\n self.target_map = scan_model(\n target_model, target_blocks, whitelist=component_whitelist\n )\n\n # Construct LoRAW network\n self.net = LoRAWNetwork(\n self.target_map,\n multiplier=multiplier,\n lora_dim=lora_dim,\n alpha=alpha,\n dropout=dropout,\n module_dropout=module_dropout,\n )\n\n # Get a list of bottom-level lora modules, excluding the originals\n self.residual_modules = nn.ModuleDict()\n for name, module in self.net.lora_modules.items():\n self.residual_modules[f\"{name}/lora_down\"] = module.lora_down\n self.residual_modules[f\"{name}/lora_up\"] = module.lora_up\n\n def activate(self):\n assert not self.is_active, \"LoRAW is already active\"\n self.net.activate(self.target_map)\n self.is_active = True\n\n def configure_optimizers(self):\n return optim.Adam([*self.residual_modules.parameters()], lr=self.lr)\n\n def prepare_for_training(self, training_wrapper, lr=None):\n assert self.is_active, \"LoRAW must be activated before training preparation\"\n\n # Freeze target model\n for param in self.target_model.parameters():\n param.requires_grad = False\n\n # Unfreeze lora modules\n for param in self.residual_modules.parameters():\n param.requires_grad = True\n\n # Move lora to training device\n self.net.to(device=training_wrapper.device)\n\n # Replace optimizer to use lora parameters\n if lr is None:\n self.lr = training_wrapper.lr\n else:\n self.lr = lr\n training_wrapper.configure_optimizers = self.configure_optimizers\n\n # Trim ema model if present\n if self.model_type is not None and self.model_type in EMA_MODEL:\n trim_ema(getattr(training_wrapper, EMA_MODEL[self.model_type]))\n\n self.is_trainable = True\n\n def save_weights(self, path, dtype=torch.float16):\n torch.save(self.residual_modules.state_dict(), path)\n\n def load_weights(self, path):\n weights = torch.load(path, map_location=\"cpu\")\n info = self.residual_modules.load_state_dict(weights, False)\n return info\n\n def merge_weights(self, path, multiplier=1.0):\n weights = torch.load(path, map_location=\"cpu\")\n for name, weight in weights.items():\n param = self.residual_modules.state_dict()[name]\n param.copy_(param + weight * multiplier)\n\n def extract_diff(self, tuned_model):\n lora_weights = calculate_svds(\n self.net.lora_modules,\n tuned_model,\n self.net.lora_modules.keys(),\n rank=self.net.lora_dim,\n )\n for name, (down_weight, up_weight) in lora_weights.items():\n self.residual_modules[f\"{name}/lora_down\"].weight.copy_(down_weight)\n self.residual_modules[f\"{name}/lora_up\"].weight.copy_(up_weight)\n\n\ndef create_loraw_from_config(config, model):\n loraw_config = config[\"loraw\"]\n\n model_type = config[\"model_type\"]\n\n target_blocks = loraw_config.get(\"target_blocks\", None)\n assert target_blocks is not None, \"Must specify target blocks in config\"\n\n component_whitelist = loraw_config.get(\"component_whitelist\", None)\n assert component_whitelist is not None, \"Must specify component whitelist in config\"\n\n multiplier = loraw_config.get(\"multiplier\", None)\n assert multiplier is not None, \"Must specify multiplier in config\"\n\n rank = loraw_config.get(\"rank\", None)\n assert rank is not None, \"Must specify rank in config\"\n\n alpha = loraw_config.get(\"alpha\", None)\n assert alpha is not None, \"Must specify alpha in config\"\n\n dropout = loraw_config.get(\"dropout\", None)\n if dropout == 0: dropout = None\n\n module_dropout = loraw_config.get(\"module_dropout\", None)\n if module_dropout == 0: module_dropout = None\n\n loraw = LoRAWWrapper(\n model,\n model_type=model_type,\n target_blocks=target_blocks,\n component_whitelist=component_whitelist,\n multiplier=multiplier,\n lora_dim=rank,\n alpha=alpha,\n dropout=dropout,\n module_dropout=module_dropout,\n )\n\n return loraw\n","repo_name":"Bikecicle/LoRAW","sub_path":"loraw/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"36959783490","text":"#!/usr/bin/env python\n\nimport math\nimport sys\nimport os\nimport re\nimport gzip\nfrom collections import defaultdict\nfrom array import array\nimport gc\nfrom collections import Counter\nimport heapq\n\n\nclass InvertedIndex:\n \"\"\"\n Class for representing an inverted index, containing methods\n for building the index and storing its files, as well as related\n functionality such as document lengths, computing scores (tiered and\n untiered\"\"\"\n\n def __init__(self, n=0, isTiered=False, nTiers=None):\n self.n = n # initial size (here static)\n self.isTiered = isTiered # is this a two-tiered index or not\n if isTiered:\n self.nTiers = nTiers\n self.indexes = []\n for i in range(nTiers):\n self.indexes.append(defaultdict(lambda: array('L')))\n assert (self.indexes[0] is not self.indexes[1])\n else:\n self.index = defaultdict(lambda: array('L')) # main index\n\n def stopwords(self):\n \"\"\"Read stopwords from the stopwords file\"\"\"\n par = os.path.dirname(os.getcwd())\n with open(par + '/search-engine/input/stopwords.txt', 'rt') as f:\n self.stopwords = {line.rstrip() for line in f}\n\n def parseWords(self, text, pp=True):\n \"\"\"Parse all terms in a text, and then preprocess if\n the pp parameter is passed as True; pp includes:\n lowercasing and stopword removal\"\"\"\n\n # Use regex to extract wordforms, or lemmas, etc.\n # Using lemma here depending on the preprocessing flag pp\n if pp:\n pattern = (r\"^[0-9]+\\s+\" # word number\n \"[a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+[0-9]*\\s+\"\n \"([a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+)[0-9]*[-_]?.*\\s+\" # use lemma\n \"[A-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ0-9-=]+\\s+\"\n \"[a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+$\")\n\n else:\n pattern = (r\"^[0-9]+\\s+\" # word number\n \"([a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+)[0-9]*\\s+\" # use form\n \"[a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+[0-9]*[-_]?.*\\s+\"\n \"[A-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ0-9-=]+\\s+\"\n \"[a-zěščřžťďňńáéíýóůúA-ZĚŠČŘŽŤĎŇŃÁÉÍÝÓŮÚ]+$\")\n\n tokens = re.findall(pattern, text, re.MULTILINE)\n\n # Normalize: lowercase, filter stopwords\n if pp:\n # tokens = [token.lower() for token in tokens if token not in self.stopwords]\n tokens = [token.lower() for token in tokens]\n\n # print(tokens)\n return tokens\n\n def parseDoc(self, f, title=False):\n \"\"\"Parse the docid, title, heading and text of a document as\n necessary. I used title to compile the title index (tier 1) and\n text to compile the text index (tier 2)\"\"\"\n\n text = f.read()\n docidMatch = re.search('(.*)', text, re.DOTALL)\n docid = docidMatch.group(1)\n\n # Get title and return for tier 1 index\n if title:\n titleMatch = re.search('(.*)', text, re.DOTALL)\n if titleMatch is not None:\n text = titleMatch.group(1)\n else:\n text = None\n\n # Otherwise, parse and return \n else:\n textMatch = re.search('(.*)', text, re.DOTALL)\n text = textMatch.group(1)\n# if len(textMatch) > 1:\n# raise EnvironmentError()\n\n #if docid is None or text is None:\n # raise IOError(\"document file does not conform to format\")\n\n return docid, text # change for text\n\n def parseQuery(self, topic):\n \"\"\"Parse the title, description and narrative of the topic/query\"\"\"\n\n with open(topic, 'rt') as f:\n\n topic = f.read()\n\n # Perform a regex search on text\n qid = re.search('(.*)', topic, re.DOTALL)\n title = re.search('(.*)', topic, re.DOTALL)\n desc = re.search('(.*)', topic, re.DOTALL)\n narr = re.search('(.*)', topic, re.DOTALL)\n\n if qid is None or (title is None and desc is None and narr is None):\n raise IOError(\"topic file does not conform to format\")\n\n # Capture and store groups in a dict\n parsedTopic = {}\n parsedTopic['qid'] = qid.group(1)\n parsedTopic['title'] = title.group(1)\n parsedTopic['desc'] = desc.group(1) if desc else None\n parsedTopic['narr'] = narr.group(1) if narr else None\n\n return parsedTopic\n\n def combineInts(self, x, y):\n \"\"\"Helper method to combine a docid and tf integers, so as\n to speed up indexing and memory usage.\"\"\"\n\n hi = x << 32\n lo = y & 0x0000FFFF\n return hi | lo\n\n def splitInts(self, z):\n \"\"\"Helper method to recover original docid and tf resulting\n from above CombineInts method.\"\"\"\n\n x = z >> 32\n y = z & 0x0000FFFF\n return x, y\n\n def truncateDocid(self, docid):\n \"\"\"Helper method to shorten redundancies in\n docids for this task, converting to an int, thereby saving memory\"\"\"\n\n if docid[0] == 'L':\n docid = '1' + docid[7:] # begins with LN\n else:\n docid = '2' + docid[7:] # begins with MF\n\n return int(docid)\n\n def expandDocid(self, docid):\n \"\"\"Helper method to recover full docid in string form\"\"\"\n\n docid = str(docid)\n if docid[0] == '1':\n docid = 'LN-2002' + docid[1:] # begins with LN\n else:\n docid = 'MF-2002' + docid[1:] # begins with MF\n\n return docid\n\n def getPostings(self, offset, tier):\n \"\"\"Given a word offset from list, get its posting list\"\"\"\n\n with gzip.open(cwd + tier + '/index.gz', 'rt') as f:\n f.seek(offset)\n line = f.readline()\n return line\n\n def calculateWeightOfTerm(self, term, tf, df, scheme=None, doc=None, tier=0):\n \"\"\"Given a term, its tf, df, a scheme list (doc, query), a\n document (unless we're dealing with a query, and a tier index\n calculate its unnormalized weight (tf * df).\"\"\"\n\n if not scheme:\n return 1 # boolean method\n\n # Get term freq scheme\n if scheme[0] == 'n': # natural\n tf = float(tf)\n elif scheme[0] == 'l': # logarithmic\n tf = 1.0 + math.log10(tf)\n elif scheme[0] == 'a' and doc is not None: # augmented (scale by max tf)\n tf = .4 + .6 * tf / maxTF[tier][doc]\n elif scheme[0] == 'b': # boolean\n tf = 1.0 if tf > 0 else 0.0\n elif scheme[0] == 'L': # boolean\n tf = (1.0 + math.log10(tf) / 1.0 + math.log10(aveTF[tier][doc]))\n else:\n raise AttributeError(\"Illegal scheme for tf.\")\n\n # Get doc freq scheme\n if scheme[1] == 'n': # natural\n df = 1.0\n elif scheme[1] == 't': # idf\n df = math.log10(float(self.n)/df)\n elif scheme[1] == 'p': # prob idf\n df = max(0, math.log10((self.n - df)/float(df)))\n else:\n raise AttributeError(\"Illegal scheme for df.\")\n return tf * df\n\n def atEndOfLists(self, lists):\n \"\"\"Helper method for DAAT processing of indices\"\"\"\n for pl in lists:\n if pl.current() != -1:\n print(\"not at end of list\")\n return False\n return True\n\n def cosineScoreTiered(self, query, docScheme, queryScheme, k, pp=True):\n \"\"\"Method for computing the cosine score of a\n query using a tiered index. The algorithm works as follows:\n first level 1 scores are computed. If all k docs are\n not found, the next level is searched and merged with\n previous level results.\"\"\"\n\n # Parse query from title\n query = query.rstrip()\n parsedQuery = index.parseQuery(query)\n qid = parsedQuery['qid'].strip() # number of topic doc\n title = parsedQuery['title'] # use for query terms\n\n # Get terms and normalize length if necessary\n terms = index.parseWords(title, pp) # lowercase text, filter stopwords\n terms = Counter(terms) # get counts and store in dict/hash\n\n # For each query term calculate weight\n scores = defaultdict(lambda: 0.0)\n numTiers = 2 # first (base/titles) and second tier (text)\n n = 0 # num elements in heap\n tier = 0 # start here at base tier\n while tier < numTiers and n <= k:\n print('Tier', tier)\n\n # First get all lists for query terms and query term weights for this tier\n for term, tf in terms.items():\n if term in offsets[tier]:\n offset = offsets[tier][term] # byte offsets\n indexes[tier].seek(offset) # get postings\n\n # Process postings\n # term 10 LN-20020114114,1 LN-20020121109,2 LN-20020123062,3\n line = indexes[tier].readline().rstrip().split()\n df = float(line[1])\n queryWeight = self.calculateWeightOfTerm(term, tf, df,\n queryScheme, doc=None, tier=tier) # weight of query\n postings = line[2:]\n postings = [posting.split(',') for posting in postings]\n postings = [(doc, int(tf)) for doc, tf in postings]\n\n # Get scores TAAT for docs in postings\n for doc, tf in postings:\n docWeight = self.calculateWeightOfTerm(term, tf, df,\n docScheme, doc, tier) # weight of doc\n score = queryWeight * docWeight\n scores[doc] += score\n\n else:\n print(\"Word {} not in index. Skipping...\".format(term))\n\n # Normalize query if specified (only cosine normalization here)\n lengthQuery = 0.0\n if queryScheme[-1] == 'c': # cosine norm\n for term, tf in terms.items():\n lengthQuery += tf * tf\n # print(token, tf, length)\n lengthQuery = math.sqrt(lengthQuery)\n else:\n lengthQuery = 1.0 # no norm\n\n # Normalize scores\n if docScheme[-1] == 'c':\n for doc, score in scores.items():\n lengthDoc = docLengths[tier][doc]\n scores[doc] /= lengthDoc * lengthQuery # normalize (cosine)\n elif docScheme[-1] == 'u':\n a = .65 # slope normally between .25-.4, but .65 seemed optimal\n pivot = 2630 # 2630 = ave bytes in disks 1-2 of TREC\n for doc, score in scores.items():\n pivotedLengthDoc = a * uniq[tier][doc] + (1 - a) * pivot\n scores[doc] /= pivotedLengthDoc * lengthQuery # normalize (cosine)\n\n # Get top k scores using a heap method\n topKScores = heapq.nlargest(k, scores.items(), key=lambda x: x[1])\n n = len(topKScores)\n\n # Have k scores, done, write to disk, return\n if n == k or tier == numTiers - 1:\n print(\"more than k scores. Or just done\")\n\n # Write to disk\n for i, (doc, score) in enumerate(topKScores):\n res = str(qid) + ' ' + '0 ' + doc + ' ' + str(i) + ' ' + str(score) + ' ' + runId + '\\n'\n results.write(res)\n return topKScores\n\n # Need more scores so go to next tier\n else:\n print(\"less than k scores moving to next tier\")\n tier += 1\n\n\n def cosineScoreDAAT(self, query, docScheme, queryScheme, k):\n \"\"\"\n Document-at-a-time method for computing cosine score.\n\n Unfinished, as I couldn't figure out how to get\n the iterator to work. Also need to pre-compute the\n upper/max threshold for documents. \"\"\"\n\n class PostingsList:\n\n def __init__(self, l):\n self.list = l\n self.n = len(l)\n self.ptr = 0\n self.peakPtr = 0\n\n def peak(self):\n if self.peakPtr < self.n:\n self.peakPtr += 1\n return self.list[self.peakPtr - 1][0] # return next doc\n else:\n return -1\n\n def next(self):\n if self.ptr < self.n:\n self.ptr += 1\n return self.list[self.ptr - 1][0] # return next doc\n else:\n return -1\n\n def resetPeakPtr(self):\n peakPtr = ptr\n\n def current(self):\n if self.ptr < self.n:\n return self.list[self.ptr][0] # return current doc\n return -1\n\n # Parse query from title\n query = query.rstrip()\n parsedQuery = index.parseQuery(query)\n qid = parsedQuery['qid'].strip() # number of topic doc\n title = parsedQuery['title'] # use for query terms\n\n # Get terms and normalize length if necessary\n terms = index.parseWords(title) # lowercase text, filter stopwords\n terms = Counter(terms) # get counts and store in dict/hash\n\n # For each query term calculate weight\n # scores = defaultdict(lambda: 0.0)\n scores = [] # heap\n numTiers = 2 # first (base/titles) and second tier (text)\n n = 0 # num elements in heap\n tier = 0 # start here at base tier\n while tier < numTiers and n <= k:\n # For each query term calculate weight\n scores = defaultdict(lambda: 0.0)\n for term, tf in terms.items():\n if term in offsets:\n offset = offsets[term]\n# print(\"getting {} at {}\".format(term, offset))\n indexFile.seek(offset)\n line = indexFile.readline().rstrip().split()\n df = float(line[1])\n wQuery = self.calculateWeightOfTerm(term, tf, df, queryScheme) # weight of query\n# print(\"df\", df)\n# print(\"w query=\", wQuery)\n postings = line[2:]\n postings = [posting.split(',') for posting in postings]\n postings = [(doc, int(tf)) for doc, tf in postings]\n# print(postings)\n for doc, tf in postings:\n# print(doc, tf)\n wDoc = self.calculateWeightOfTerm(term, tf, df, docScheme, doc) # weight of doc\n score = wQuery * wDoc\n# print('score', score)\n scores[doc] += wQuery * wDoc\n# score += float(tf)\n# print(doc, scores[doc])\n\n else:\n print(\"Word {} not in index. Skipping...\".format(term))\n\n # First get all lists for query terms and query term weights for this tier\n lists = [] # tier indices\n qWeights = [] # tier query w\n for i, (term, tf) in enumerate(terms.items()):\n if term in offsets[tier]:\n offset = offsets[tier][term]\n # print(\"getting {} at {}\".format(term, offset))\n indexes[tier].seek(offset)\n line = indexes[tier].readline().rstrip().split()\n df = float(line[1])\n qWeight = self.calculateWeightOfTerm(term, tf, df,\n queryScheme, tier) # weight of query\n # print(\"df\", df)\n # print(\"w query=\", wQuery)\n qWeights.append(qWeight)\n postings = line[2:]\n postings = [posting.split(',') for posting in postings]\n postings = [(doc, int(tf)) for doc, tf in postings]\n pl = PostingsList(postings) # instantiate posting class\n lists.append(pl)\n\n print('added posting for: ', term)\n print(postings)\n\n # for pl in lists:\n #print(pl.list)\n #print(pl.ptr)\n #print(pl.next())\n\n # Now calculate scores for tier, doc at a time:\n # if above threshold equal to current min, add to heap\n # print(postings)\n\n\n while not self.atEndOfLists(lists):\n print(\"getting scores for lists of len\", len(lists))\n for pl in lists:\n thisDoc = pl.current()\n print(\"this\", thisDoc)\n # Search for doc in other lists by advancing ptrs\n for otherPL in lists:\n if otherPL is not pl:\n\n otherDoc = otherPL.current()\n print(\"init other\", otherDoc)\n while otherDoc < thisDoc: # advance next ptr\n otherDoc = otherPL.peak()\n print(\"next other\", otherDoc)\n if otherDoc == -1: # reached end of list\n print(\"end of list\")\n break\n if otherDoc == thisDoc:\n print(\"same doc\")\n\n # Advance ptr for all lists\n for pl in lists:\n pl.next()\n break\n # wDoc = self.calculateWeightOfTerm(term, tf, df, docScheme, doc) # weight of doc\n # score = wQuery * wDoc\n # # print('score', score)\n # scores[doc] += wQuery * wDoc\n # # score += float(tf)\n # # print(doc, scores[doc])\n\n # else:\n # print(\"Word {} not in index. Skipping...\".format(term))\n\n # lengthQuery = 0.0\n # if queryScheme[-1] == 'c': # cosine norm\n # for term, tf in terms.items():\n # lengthQuery += tf * tf\n # # print(token, tf, length)\n # lengthQuery = math.sqrt(lengthQuery)\n # else:\n # lengthQuery = 1.0 # no norm\n\n # # print(\"len query=\", lengthQuery)\n\n # # Normalize scores\n # if docScheme[-1] == 'c':\n # for doc, score in scores.items():\n # # print(doc, score)\n # lengthDoc = docLengths[doc]\n # # print(\"len=\", lengthDoc, lengthQuery)\n # scores[doc] /= lengthDoc * lengthQuery # normalize (cosine)\n # # print(\"norm\", scores[doc])\n # elif docScheme[-1] == 'u':\n # a = .5 # slope normally between .25-.4\n # pivot = 2730 # ave bytes in disks 1-2 of TREC\n # for doc, score in scores.items():\n # # print(doc, score)\n # pivotedLengthDoc = a * uniq[doc] + (1 - a) * pivot\n # # print(\"len=\", lengthDoc, lengthQuery)\n # scores[doc] /= pivotedLengthDoc * lengthQuery # normalize (cosine)\n\n # # Get top k scores\n # topK = heapq.nlargest(k, scores.items(), key=lambda x: x[1])\n\n # # Write to disk\n # for i, (doc, score) in enumerate(topK):\n # res = str(qid) + ' ' + '0 ' + doc + ' ' + str(i) + ' ' + str(score) + ' ' + run + '\\n'\n # results.write(res)\n\n # return topK\n\n def cosineScoreTAAT(self, query, docScheme, queryScheme, k, pp=True):\n \"\"\"\n Original non-tiered method for computing the cosine\n score\"\"\"\n\n tier = 0 # no tier here, so only one tier in list of index files [indexFile]\n\n # Parse query from title\n query = query.rstrip()\n parsedQuery = index.parseQuery(query)\n qid = parsedQuery['qid'].strip() # number of topic doc\n title = parsedQuery['title'] # use for query terms\n\n # Get terms and normalize length if necessary\n terms = index.parseWords(title, pp) # lowercase text, filter stopwords\n terms = Counter(terms) # get counts and store in dict/hash\n\n # For each query term calculate weight\n scores = defaultdict(lambda: 0.0)\n for term, tf in terms.items():\n if term in offsets[tier]:\n offset = offsets[tier][term]\n# print(\"getting {} at {}\".format(term, offset))\n indexes[tier].seek(offset)\n line = indexes[tier].readline().rstrip().split()\n df = float(line[1])\n wQuery = self.calculateWeightOfTerm(term, tf, df, queryScheme, doc=None) # weight of query\n# print(\"df\", df)\n# print(\"w query=\", wQuery)\n postings = line[2:]\n postings = [posting.split(',') for posting in postings]\n postings = [(doc, int(tf)) for doc, tf in postings]\n# print(postings)\n for doc, tf in postings:\n# print(doc, tf)\n wDoc = self.calculateWeightOfTerm(term, tf, df, docScheme, doc) # weight of doc\n score = wQuery * wDoc\n# print('score', score)\n scores[doc] += wQuery * wDoc\n# score += float(tf)\n# print(doc, scores[doc])\n\n else:\n print(\"Word {} not in index. Skipping...\".format(term))\n\n lengthQuery = 0.0\n if queryScheme[-1] == 'c': # cosine norm\n for term, tf in terms.items():\n lengthQuery += tf * tf\n # print(token, tf, length)\n lengthQuery = math.sqrt(lengthQuery)\n else:\n lengthQuery = 1.0 # no norm\n\n# print(\"len query=\", lengthQuery)\n\n # Normalize scores\n if docScheme[-1] == 'c':\n for doc, score in scores.items():\n# print(doc, score)\n lengthDoc = docLengths[tier][doc]\n# print(\"len=\", lengthDoc, lengthQuery)\n scores[doc] /= lengthDoc * lengthQuery # normalize (cosine)\n# print(\"norm\", scores[doc])\n elif docScheme[-1] == 'u':\n a = .65 # slope normally between .25-.4\n pivot = 2630 # ave bytes in disks 1-2 of TREC\n for doc, score in scores.items():\n# print(doc, score)\n pivotedLengthDoc = a * uniq[tier][doc] + (1 - a) * pivot\n# print(\"len=\", lengthDoc, lengthQuery)\n scores[doc] /= pivotedLengthDoc * lengthQuery # normalize (cosine)\n#\n # Get top k scores\n topKScores = heapq.nlargest(k, scores.items(), key=lambda x: x[1])\n\n # Write to disk\n for i, (doc, score) in enumerate(topKScores):\n res = str(qid) + ' ' + '0 ' + doc + ' ' + str(i) + ' ' + str(score) + ' ' + runId + '\\n'\n results.write(res)\n\n return topKScores\n\n def buildIndex(self, tier=None, tierName=None, pp=True):\n \"\"\"Build index (indices if tiered)\"\"\"\n\n gc.enable()\n if self.isTiered:\n curIndex = self.indexes[tier]\n else:\n curIndex = self.index\n\n lengths = {} # for calculating and storing document (cosine) lengths\n uniq = {}\n aveTF = {}\n maxTF = {}\n\n # Parse all documents\n for doc in open('documents.list', 'rt'):\n\n # First parse and get token counts for each doc\n docid, counts = self.getTokenCounts(doc, tier, pp)\n if docid is None or counts is None:\n continue\n # Add terms to index\n self.addToIndex(docid, counts, tier)\n\n # Calculate normalized doc length\n lengths[docid] = self.calculateDocLen(counts)\n\n # Calculate unique terms in doc\n uniq[docid] = self.calculateNumberUniqTerms(counts)\n\n # Calculate ave tf in doc\n aveTF[docid] = self.calculateAveTermFreq(counts)\n\n # Calculate max tf in doc\n maxTF[docid] = self.calculateMaxTermFreq(counts)\n\n\n # Write data to disk\n self.writeIndex(tier, tierName) # write index, creating offsets\n self.writeOffsets(tier, tierName) # write offsets\n self.write(lengths, 'lengths', tierName)\n del lengths\n self.write(uniq, 'uniq', tierName)\n del uniq\n self.write(aveTF, 'ave-tf', tierName)\n del aveTF\n self.write(maxTF, 'max-tf', tierName)\n del maxTF\n\n # Get rid of garbage\n gc.collect()\n\n def getTokenCounts(self, doc, tier=None, pp=True):\n \"\"\"Helper method for getting the tokens and their counts\n for a document.\n Returns (docid, counts), where docid is compressed/abbreviated\n form. \"\"\"\n\n # Get token counts\n fname = doc.rstrip() # documents/LN-20020102023.vert\n# path = cwd + fname\n f = gzip.open(fname + '.gz', 'rt')\n\n # Only parse terms in of doc if tier 0, else parse all of <TEXT>\n if tier == 0:\n docid, text = self.parseDoc(f, title=True)\n else:\n docid, text = self.parseDoc(f)\n\n # Truncate prefix of doc id in order to save space: will expand later\n docid = self.truncateDocid(docid)\n\n # Most likely no title in text\n if text is None:\n # print('no text')\n return docid, None\n\n # Parse words in doc and preprocess text\n tokens = self.parseWords(text, pp)\n if not tokens:\n return docid, None\n\n return docid, Counter(tokens)\n\n def addToIndex(self, docid, counts, tier=None):\n \"\"\"Add docid and tfs of a doc in compressed form to index.\n If tiered, add to current tier index.\"\"\"\n\n for token, tf in counts.items():\n\n # Combine 32 bit docid and tf into a 64-bit long to save space (recover later)\n idPlusTf = self.combineInts(docid, tf)\n if self.isTiered:\n self.indexes[tier][token].append(idPlusTf)\n assert self.indexes[0] is not self.indexes[1]\n else:\n self.index[token].append(idPlusTf) # append a new entry and postings list\n\n def calculateDocLen(self, counts):\n \"\"\"Computer and write doc length for scoring\"\"\"\n\n # Calculate length of doc\n length = 0\n for token, cnt in counts.items():\n length += cnt * cnt\n length = math.sqrt(length)\n\n return length\n\n def calculateNumberUniqTerms(self, counts):\n \"\"\"Compute number of unique terms in a doc, used in cosine score\n calculation.\"\"\"\n\n return len(counts)\n\n def calculateMaxTermFreq(self, counts):\n \"\"\"Compute and write max term frequency for scoring.\"\"\"\n\n return counts.most_common(1)[0][1] # tf of most common element\n\n def calculateAveTermFreq(self, counts):\n \"\"\"Compute average term frequency in doc, used for scoring.\"\"\"\n\n s = sum([c for t, c in counts.items()]) # sum tfs\n return s / float(len(counts))\n\n def writeIndex(self, tier=None, tierName=None):\n \"\"\"Helper method to write index to disk. Once written\n offsets are written to index, later to be written also\"\"\"\n\n gc.enable()\n\n if self.isTiered:\n f = gzip.open(cwd + run + '/' + tierName + '/index.gz', 'wt')\n curIndex = self.indexes[tier] # get ref to cur tier index\n else:\n f = gzip.open(cwd + run + '/index.gz', 'wt')\n curIndex = self.index # no tier, just index\n\n print(\"writing index\")\n for token, postings in sorted(curIndex.items()):\n df = len(postings)\n postings = [self.splitInts(post) for post in postings]\n postings = [str(self.expandDocid(x)) + ',' + str(y) for x, y in postings]\n offset = f.tell()\n f.write(token + '\\t' + str(df) + '\\t' + ' '.join(postings) + '\\n')\n curIndex[token] = offset # replace posting with offset\n\n # Clean up\n f.close()\n del curIndex\n del postings\n gc.collect()\n\n def writeOffsets(self, tier=None, tierName=None):\n \"\"\"Helper method to write byte offsets of term to disk\"\"\"\n\n gc.enable()\n\n if self.isTiered:\n f = gzip.open(cwd + run + '/' + tierName + '/offsets.gz', 'wt')\n curIndex = self.indexes[tier] # get ref to cur tier index\n else:\n f = gzip.open(cwd + run + '/offsets.gz', 'wt')\n curIndex = self.index # no tier, just index\n\n print(\"writing offsets\")\n for token, offset in sorted(curIndex.items()):\n f.write(token + '\\t' + str(offset) + '\\n')\n\n # Clean up\n f.close()\n del curIndex\n\n def write(self, data, file, tierName=None):\n \"\"\"Helper method to write data in the form of a dict\n with docid keys to disk\"\"\"\n\n if self.isTiered:\n f = gzip.open(cwd + run + '/' + tierName + '/' + file + '.gz', 'wt')\n else:\n f = gzip.open(cwd + run + '/' + file + '.gz', 'wt')\n\n print(\"writing \", file)\n for docid, val in data.items():\n f.write(self.expandDocid(docid) + '\\t' + str(val) + '\\n')\n\nif __name__ == \"__main__\":\n\n os.chdir('..') # should be in search-engine/\n cwd = os.getcwd() + '/output/' # working dir for you\n os.chdir('../A1') # data in here\n\n # Get sys args\n if len(sys.argv) < 2:\n raise ValueError('Must provide run type')\n\n runId = sys.argv[1] # test-run-0 baseline, etc.\n run = runId[-5:] # trim to run-0\n\n if not os.path.exists(cwd + run):\n os.makedirs(cwd + run) # .../output/run-0/...\n\n # Instantiate index class\n if 't' in sys.argv[2]:\n isTiered = True # this is a two-tiered index\n nTiers = 2\n else:\n isTiered = False\n nTiers = None\n index = InvertedIndex(81735, isTiered, nTiers) # instantiate index with size n, nTiers\n index.stopwords() # read stop word list\n\n # Train/Test query/topics\n if 'q' in sys.argv[2]:\n\n print(\"query/topics test/train\")\n if len(sys.argv) >= 9:\n docScheme = sys.argv[3] # ddd triplet\n queryScheme = sys.argv[4] # qqq triplet\n k = int(sys.argv[5])\n topicsList = sys.argv[6] # 'test-topics.list'\n docsList = sys.argv[7] # 'documents.list'\n out = cwd + sys.argv[8] # .../output/.dat file\n if len(sys.argv) == 10 and 'pp' in sys.argv[9]: # pre-process (pp)\n pp = True\n else:\n pp = False\n\n # Load data - tier one and tier two\n offsets = []\n if isTiered:\n dirs = ['/tier0/offsets.gz', '/tier1/offsets.gz']\n else:\n dirs = ['/offsets.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n off = {}\n for line in f:\n word, offset = line.rstrip().split()\n off[word] = int(offset)\n offsets.append(off)\n\n docLengths = []\n if isTiered:\n dirs = ['/tier0/lengths.gz', '/tier1/lengths.gz']\n else:\n dirs = ['/lengths.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n mtf = {}\n dl = {}\n for line in f:\n doc, length = line.rstrip().split()\n dl[doc] = float(length)\n docLengths.append(dl)\n\n maxTF = []\n if isTiered:\n dirs = ['/tier0/max-tf.gz', '/tier1/max-tf.gz']\n else:\n dirs = ['/max-tf.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n for line in f:\n doc, tf = line.rstrip().split()\n mtf[doc] = int(tf)\n maxTF.append(mtf)\n\n aveTF = []\n if isTiered:\n dirs = ['/tier0/ave-tf.gz', '/tier1/ave-tf.gz']\n else:\n dirs = ['/ave-tf.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n atf = {}\n for line in f:\n doc, tf = line.rstrip().split()\n atf[doc] = float(tf)\n aveTF.append(atf)\n\n uniq = []\n if isTiered:\n dirs = ['/tier0/uniq.gz', '/tier1/uniq.gz']\n else:\n dirs = ['/uniq.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n un = {}\n for line in f:\n doc, u = line.rstrip().split()\n un[doc] = int(u)\n uniq.append(un)\n\n # Compute scores for all documents in list\n indexes = []\n with open(topicsList, 'rt') as topicsListFile:\n if isTiered:\n dirs = ['/tier0/index.gz', '/tier1/index.gz']\n else:\n dirs = ['/index.gz']\n for d in dirs:\n f = gzip.open(cwd + run + d, 'rt')\n# with gzip.open(run + tier + '/index.gz', 'rt') as indexFile1:\n# if isTiered:\n# indexFile2 = gzip.open(run + tier + '/index2.gz', 'rt')\n indexes.append(f)\n# indexes.append(indexFile1)\n with open(out, 'wt') as results:\n\n # Get query terms for `title` field in topics list\n if isTiered:\n for query in topicsListFile:\n # This is score for tiered set up\n score = index.cosineScoreTiered(query, docScheme, queryScheme, k, pp)\n else:\n for query in topicsListFile:\n # This is score for tiered set up\n score = index.cosineScoreTAAT(query, docScheme, queryScheme, k, pp)\n\n # Close files\n for f in indexes:\n f.close()\n\n # Build index\n # USAGE: python3 invertedIndex.py {-b, -bt} [-pp]\n if 'b' in sys.argv[2]:\n if len(sys.argv) == 4 and 'pp' in sys.argv[3]: # pre-process text\n pp = True\n else:\n pp = False\n\n if isTiered: # tiered index: [-bt]\n if not os.path.exists(cwd + run + '/tier0'):\n os.makedirs(cwd + run + '/tier0') # .../output/run0/tier0/...\n if not os.path.exists(cwd + run + '/tier1'):\n os.makedirs(cwd + run + '/tier1') # .../output/run0/tier0/...\n index.buildIndex(0, 'tier0', pp) # build tier 0 (titles)\n index.buildIndex(1, 'tier1', pp) # build tier 1 (text)\n\n else:\n index.buildIndex(pp=pp)\n","repo_name":"ericlief/search-engine","sub_path":"src/invertedIndex.py","file_name":"invertedIndex.py","file_ext":"py","file_size_in_byte":35329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"1552206490","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@class : FeatureExtraction \r\n\r\n@method : find_features\r\n\r\n@parameter \r\n document string\r\n\r\n@author: Niraj Gautam\r\n\"\"\"\r\n\r\nfrom nltk.tokenize import word_tokenize\r\nfrom helper import constant\r\nimport pickle\r\nclass Featurextraction:\r\n \r\n filePath= constant.constant()\r\n word_feature_file=\"word_features.txt\"\r\n \r\n def __init__(self):\r\n self.word_features = open (self.filePath.path() + self.word_feature_file, \"rb\")\r\n self.word_features = pickle.load(self.word_features)\r\n \r\n \r\n \r\n \r\n def find_feature(self,document):\r\n \"\"\"\r\n @parameter\r\n document: string \r\n @return \r\n tokenized features list\r\n \"\"\"\r\n words = word_tokenize(document)\r\n features = {}\r\n for w in self.word_features:\r\n features[w] = (w in words)\r\n return features\r\n \r\n","repo_name":"14987/Quote-Classification-NLP","sub_path":"helper/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"35972171364","text":"import warnings\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\nfrom paddleseg.models.backbones.strideformer import ConvBNAct\n\n\n@manager.MODELS.add_component\nclass PPMobileSeg(nn.Layer):\n \"\"\"\n The PP_MobileSeg implementation based on PaddlePaddle.\n\n The original article refers to \"Shiyu Tang, Ting Sun, Juncai Peng, Guowei Chen, Yuying Hao, \n Manhui Lin, Zhihong Xiao, Jiangbin You, Yi Liu. PP-MobileSeg: Explore the Fast and Accurate \n Semantic Segmentation Model on Mobile Devices. https://arxiv.org/abs/2304.05152\"\n\n\n Args:\n num_classes(int): The unique number of target classes.\n backbone(nn.Layer): Backbone network.\n head_use_dw (bool, optional): Whether the head use depthwise convolutions. Default: True.\n align_corners (bool, optional): Set the align_corners in resizing. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n upsample (str, optional): The type of upsample module, valid for VIM is recommend to be used during inference. Default: intepolate.\n \"\"\"\n\n def __init__(self,\n num_classes,\n backbone,\n head_use_dw=True,\n align_corners=False,\n pretrained=None,\n upsample='intepolate'):\n super().__init__()\n self.backbone = backbone\n self.upsample = upsample\n self.num_classes = num_classes\n\n self.decode_head = PPMobileSegHead(\n num_classes=num_classes,\n in_channels=backbone.feat_channels[0],\n use_dw=head_use_dw,\n align_corners=align_corners)\n\n self.align_corners = align_corners\n self.pretrained = pretrained\n self.init_weight()\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n x_hw = x.shape[2:]\n x = self.backbone(x)\n x = self.decode_head(x)\n if self.upsample == 'intepolate' or self.training or self.num_classes < 30:\n x = F.interpolate(\n x, x_hw, mode='bilinear', align_corners=self.align_corners)\n elif self.upsample == 'vim':\n labelset = paddle.unique(paddle.argmax(x, 1))\n x = paddle.gather(x, labelset, axis=1)\n x = F.interpolate(\n x, x_hw, mode='bilinear', align_corners=self.align_corners)\n\n pred = paddle.argmax(x, 1)\n pred_retrieve = paddle.zeros(pred.shape, dtype='int32')\n for i, val in enumerate(labelset):\n pred_retrieve[pred == i] = labelset[i].cast('int32')\n\n x = pred_retrieve\n else:\n raise NotImplementedError(self.upsample, \" is not implemented\")\n\n return [x]\n\n\nclass PPMobileSegHead(nn.Layer):\n def __init__(self,\n num_classes,\n in_channels,\n use_dw=False,\n dropout_ratio=0.1,\n align_corners=False):\n super().__init__()\n self.align_corners = align_corners\n self.last_channels = in_channels\n\n self.linear_fuse = ConvBNAct(\n in_channels=self.last_channels,\n out_channels=self.last_channels,\n kernel_size=1,\n stride=1,\n groups=self.last_channels if use_dw else 1,\n act=nn.ReLU)\n self.dropout = nn.Dropout2D(dropout_ratio)\n self.conv_seg = nn.Conv2D(\n self.last_channels, num_classes, kernel_size=1)\n\n def forward(self, x):\n x = self.linear_fuse(x)\n x = self.dropout(x)\n x = self.conv_seg(x)\n return x\n","repo_name":"PaddlePaddle/PaddleSeg","sub_path":"paddleseg/models/pp_mobileseg.py","file_name":"pp_mobileseg.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":7775,"dataset":"github-code","pt":"49"} +{"seq_id":"31795271181","text":"from django.urls import path\nfrom . import views\n\napp_name = 'memo'\nurlpatterns = [\n path('', views.index, name='index'),\n path('project/<str:pk>/', views.project, name='project'),\n path('update-memo/<str:pk>/', views.update_memo, name='update-memo'),\n path('delete-memo/<str:pk>/', views.delete_memo, name='delete-memo'),\n]","repo_name":"KoyoMiyazaki/Portfolio","sub_path":"portfolio_app/memo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"4109331331","text":"#insert from end[rear] delete from start[front]\n\n#rear removing O(n) and adding O(1)\n#front both O(1)\n#so insert from end[rear] delete from start[front]\n\n#both withh be O(1)\n\n\n# class Node:\n# def __init__(self, value):\n# self.value = value\n# self.next = None\n#\n#\n# class Queue:\n# def __init__(self, value):\n# new_node = Node(value)\n# self.first = new_node\n# self.last = new_node\n# self.length = 1\n#\n# def print_queue(self):\n# temp = self.first\n# while temp is not None:\n# print(temp.value)\n# temp = temp.next\n#\n# def enqueue(self, value):#insert\n# new_node = Node(value)\n# if self.first is None:\n# self.first = new_node\n# self.last = new_node\n# else:\n# self.last.next = new_node\n# self.last = new_node\n# self.length += 1\n# return True\n#\n# def dequeue(self):#remove\n# if self.length == 0:\n# return None\n# temp = self.first\n# if self.length == 1:\n# self.first = None\n# self.last = None\n# else:\n# self.first = self.first.next\n# temp.next = None\n# self.length -= 1\n# return temp\n#\n#\n# my_queue = Queue(1)\n# my_queue.enqueue(2)\n#\n# # (2) Items - Returns 2 Node\n# print(my_queue.dequeue())\n# # (1) Item - Returns 1 Node\n# print(my_queue.dequeue())\n# # (0) Items - Returns None\n# print(my_queue.dequeue())\n\n\n\n\n\n\n# class Queue:\n# def __init__(self):\n# self.q = list()\n# def push(self,item):\n# self.q.append(item)\n# def display(self):\n# if len(self.q) < 1:\n# return False\n# else:\n# return self.q\n# def topper(self):\n# if len(self.q) < 1:\n# return False\n# else:\n# return self.q[len(self.q)-1]\n# def pop(self):\n# if len(self.q) < 1:\n# return False\n# else:\n# return self.q.pop(0)\n#\n# q = Queue()\n# q.push(11)\n# q.push(1)\n# q.push(2)\n# q.push(3)\n# q.push(4)\n# print(q.display())\n# q.pop()\n# print(q.display())\n\n\n\n\n\nclass Node:\n def __init__(self,value):\n self.value = value\n self.next = None\n\nclass Queue:\n def __init__(self):\n self.f = None\n self.r = None\n self.h =0\n\n def push(self,value):\n node = Node(value)\n if self.r is None:\n self.f = node\n self.r = node\n else:\n self.r.next = node\n self.r = node\n self.h +=1\n\n def display(self):\n t = self.f\n while t is not None:\n print(t.value)\n t = t.next\n\n def pop(self):\n if self.f is None:\n return False\n else:\n temp = self.f\n self.f = self.f.next\n temp.next = None\n\n def top(self):\n if self.f is None:\n return False\n else:\n return self.r.value\n\nq = Queue()\nq.push(7)\nq.push(8)\nq.push(9)\nq.pop()\nq.display()\nq.push(9)\nq.display()\nprint(q.top())\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Prabhatrajput7/cadeBASE","sub_path":"Pycharm/pythonProject/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"9201511945","text":"__author__ = 'papamac'\n__version__ = '1.0.8'\n__date__ = 'May 22, 2020'\n\nfrom argparse import ArgumentParser\nimport logging\nfrom logging import addLevelName, Formatter, StreamHandler\nfrom logging.handlers import TimedRotatingFileHandler\nfrom pathlib import Path\n\nfrom . import colortext\nfrom .colortext import DATA, THREADDEBUG\n\nLOG = colortext.getLogger('Plugin')\n\n\nclass AL:\n \"\"\"\n **************************** needs work ***********************************\n \"\"\"\n\n _log = logging.getLogger('Plugin')\n parser = ArgumentParser()\n name = parser.prog.replace('.py', '')\n args = None\n\n @classmethod\n def start(cls, version=''):\n \"\"\"\n Parse command line arguments, initialize printing/logging and log main\n program starting message.\n \"\"\"\n cls.parser.add_argument('-p', '--print', choices=['THREADDEBUG',\n 'DEBUG', 'DATA', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='optional printing to sys.stdout and printing level')\n cls.parser.add_argument('-l', '--log', choices=['THREADDEBUG',\n 'DEBUG', 'DATA', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='optional file logging and logging level')\n cls.parser.add_argument('-L', '--log_directory',\n default='/var/local/log',\n help='top-level log directory (full pathname or relative)')\n cls.args = cls.parser.parse_args()\n\n addLevelName(THREADDEBUG, 'THREADDEBUG')\n addLevelName(DATA, 'DATA')\n cls._log.setLevel(THREADDEBUG)\n\n if cls.args.print:\n print_handler = StreamHandler()\n print_handler.setLevel(cls.args.print)\n print_formatter = Formatter('%(message)s')\n print_handler.setFormatter(print_formatter)\n cls._log.addHandler(print_handler)\n\n LOG.threaddebug('AL.start called')\n if cls.args.log:\n dir_path = Path(cls.args.log_directory)\n log_path = dir_path / Path(cls.name.lower() + '.log')\n try:\n dir_path.mkdir(parents=True, exist_ok=True)\n log_handler = TimedRotatingFileHandler(log_path,\n when='midnight')\n except OSError as err:\n warning = ('open error %s \"%s\" %s; log option ignored'\n % (err.errno, log_path, err.strerror))\n LOG.warning(warning)\n cls.args.log = None\n else:\n log_handler.setLevel(cls.args.log)\n log_formatter = Formatter(\n '%(asctime)s %(levelname)s %(message)s')\n log_handler.setFormatter(log_formatter)\n cls._log.addHandler(log_handler)\n\n if version:\n version = ' v' + version\n LOG.blue('starting %s%s with the following arguments/defaults:',\n cls.name, version)\n LOG.blue('%s', str(cls.args).split('(')[1][:-1])\n\n @classmethod\n def stop(cls):\n LOG.threaddebug('AL.stop called')\n\n # Log main program stopping message.\n\n LOG.blue('stopping %s', cls.name)\n","repo_name":"papamac/papamaclib","sub_path":"argsandlogs.py","file_name":"argsandlogs.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"30450559153","text":"current_year = 2021\n\nclass Person:\n '''Human class'''\n __total_persons = 0\n\n\n def __init__(self, birth_year, name, **kwargs): #конструктор\n if birth_year >= current_year:\n raise Exception(\"Bigger than current year!\")\n # raise TypeError(\"Bigger than current year!\"\n self.__birth_year = birth_year\n self.__name = name\n self.__language = kwargs.get(\"Language\")\n self.increase_total_persons()\n Person.__total_persons += 1 # total persons\n\n\n @classmethod\n def get_total_persons(cls): # общее число созданных Человеков\n return cls.__total_persons\n\n @classmethod\n def increase_total_persons(cls):\n cls.__total_persons += 1\n\n def is_adult(self):\n if current_year - self.__birth_year >= 18:\n return True\n else:\n return False\n\n def get_age(self):\n print(current_year - self.__birth_year)\n\n def talk(self):\n print(\"Hello World\")\nclass Teacher(Person):\n def talk(self):\n print(\"Greetings, I'm your teacher\")\n\np1 = Person(1995,\"Jack\", language = 'Italian')\np2 = Person(2002, \"Dilya\")\nprint(Person.get_total_persons())\nprint(p1.is_adult())\nprint(p1.get_age())\nt1 = Teacher(2010,\"Ivan\")\nt1.talk()\nprint(Person.get_total_persons())\n\n#\n# #self.__total_persons += 1\n# if self.birth_year >= self.__current_year:\n# raise TypeError(\"Bigger than current year!\")\n#\n# def is_adult(self):\n# if self.__current_year - self.birth_year >= 18:\n# print(True)\n# else:\n# print(False)\n#\n# def get_age(self):\n# print(self.__current_year - self.birth_year) # Person's age\n#\n# def talk(self):\n# print(\"Hello World\")\n#\n#\n# class Teacher(Person):\n#\n# def new_talk(self, ntalk): # Переопределние метода talk\n# self.talk = ntalk\n# print(\"Greetings, I'm your teacher\")\n#\n# def tech(self):\n# print(\"Lesson started by Teacher\")\n#\n#\n# person1 = Person('Myname', 2004)\n# person2 = Person('Aiym', 1999)\n# person3 = Person('Alice', 1990)\n#\n# print(person2.name)\n# print(person2.is_adult())\n# print(person2.get_age())\n#\n#\n#\n# teacher1 = Teacher('Airas', 2001)\n# teacher2 = Teacher('Daniiar', 2000)\n# teacher3 = Teacher('Abai', 1998)\n#\n# print(teacher1.name)\n# print(teacher1.is_adult())\n# print(teacher1.get_age())\n#Standup\n#Что сделала:\n# ДЗ №2\n# Проблемы:\n# были с get_total_perrsons","repo_name":"DilbaraAsanalieva/GeekTech_month2","sub_path":"lesson2/lesson2_hw.py","file_name":"lesson2_hw.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"1103514569","text":"#!/usr/bin/env python3\n\n# source: https://www.kaggle.com/suicaokhoailang/generating-whale-bounding-boxes\n\nimport os\nimport PIL\nfrom PIL import Image\nfrom PIL.ImageDraw import Draw\nfrom PIL import Image as pil_image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import img_to_array\nfrom scipy.ndimage import affine_transform\nfrom keras import backend as K\nimport pandas as pd\nfrom tqdm import tqdm\n\nMODEL_BASE = './'\nDATA = '../data/'\nTRAIN = os.path.join(DATA, 'train')\nTEST = os.path.join(DATA, 'test')\n\nmodel = load_model(os.path.join(MODEL_BASE, 'cropping.model'))\n\ntrain_paths = [img for img in os.listdir(TRAIN)]\ntest_paths = [img for img in os.listdir(TEST)]\n\n# Define useful constants\nimg_shape = (128,128,1)\nanisotropy = 2.15\n\ndef center_transform(affine, input_shape):\n hi, wi = float(input_shape[0]), float(input_shape[1])\n ho, wo = float(img_shape[0]), float(img_shape[1])\n top, left, bottom, right = 0, 0, hi, wi\n if wi/hi/anisotropy < wo/ho: # input image too narrow, extend width\n w = hi*wo/ho*anisotropy\n left = (wi-w)/2\n right = left + w\n else: # input image too wide, extend height\n h = wi*ho/wo/anisotropy\n top = (hi-h)/2\n bottom = top + h\n center_matrix = np.array([[1, 0, -ho/2], [0, 1, -wo/2], [0, 0, 1]])\n scale_matrix = np.array([[(bottom - top)/ho, 0, 0], [0, (right - left)/wo, 0], [0, 0, 1]])\n decenter_matrix = np.array([[1, 0, hi/2], [0, 1, wi/2], [0, 0, 1]])\n return np.dot(np.dot(decenter_matrix, scale_matrix), np.dot(affine, center_matrix))\n\n# Apply an affine transformation to an image represented as a numpy array.\ndef transform_img(x, affine):\n matrix = affine[:2,:2]\n offset = affine[:2,2]\n x = np.moveaxis(x, -1, 0)\n channels = [affine_transform(channel, matrix, offset, output_shape=img_shape[:-1], order=1,\n mode='constant', cval=np.average(channel)) for channel in x]\n return np.moveaxis(np.stack(channels, axis=0), 0, -1)\n\ndef read_raw_image(p):\n return pil_image.open(p)\n\ndef read_for_validation(x):\n t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n t = center_transform(t, x.shape)\n x = transform_img(x, t)\n x -= np.mean(x, keepdims=True)\n x /= np.std(x, keepdims=True) + K.epsilon()\n return x, t\n\ndef coord_transform(list, trans):\n result = []\n for x,y in list:\n y,x,_ = trans.dot([y,x,1]).astype(np.int)\n result.append((x,y))\n return result\n\ndef read_array(p):\n img = read_raw_image(p).convert('L')\n return img_to_array(img)\n\ndef make_bbox(p):\n raw = read_array(p)\n width, height = raw.shape[1], raw.shape[0]\n img,trans = read_for_validation(raw)\n a = np.expand_dims(img, axis=0)\n x0, y0, x1, y1 = model.predict(a).squeeze()\n (u0, v0),(u1, v1) = coord_transform([(x0,y0),(x1,y1)], trans)\n bbox = [max(u0,0), max(v0,0), min(u1,width), min(v1,height)]\n if bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:\n bbox = [0,0,width,height]\n return bbox\n\nbbox_df = pd.DataFrame(columns=['Image','x0','y0','x1','y1']).set_index('Image')\n\nfor img in tqdm(train_paths):\n bbox_df.loc[img] = make_bbox(os.path.join(TRAIN,img))\n \nfor img in tqdm(test_paths):\n bbox_df.loc[img] = make_bbox(os.path.join(TEST,img))\n\nbbox_df.to_csv(\"bounding_boxes.csv\")\n\n","repo_name":"laijasonk/kaggle","sub_path":"humpback_whale_identification/resources/suicaokhoailang_boundingboxes.py","file_name":"suicaokhoailang_boundingboxes.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"34976543084","text":"import sys\ninput = sys.stdin.readline\n\ndef BOJ2460() :\n l = []\n total = 0\n for _ in range(10) :\n a, b = map(int, input().split())\n total = total - a + b\n l.append(total)\n print(max(l))\n \nBOJ2460()\n","repo_name":"woobottle/TIL","sub_path":"Algorithm/Baekjoon/python/2460.py","file_name":"2460.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70990419957","text":"from random import randint, uniform\n\n#Lectura de parametros\nn = int(input('Cantidad de Productos: '))\nutilidad = []\ndisponibilidad = []\n\n#Generacion Aleatoria de parametros (Beneficio y Disponibilidad)\nw = randint(30,50)\n\nprint ('\\nDisponibilidad')\nfor i in range(n):\n value = randint(5,15)\n disponibilidad.append(value)\n print (\"d_\" + str(i+1), \"=\" , value)\n\nprint ('\\nUtilidad')\nfor i in range(n):\n value = randint(4, 10)\n utilidad.append(value)\n print (\"u_\" + str(i+1), \"=\" , value)\n\n\n#Generación del Modelo\nprint ('\\n-LINDO-Modelo')\n\n##Funcion Objetivo\nobjective_function = \"max \"\nfor i in range(n):\n objective_function += str(utilidad[i]) + \"x\" + str(i+1)\n if i<n-1:\n objective_function+=\" + \"\n else:\n objective_function += \"\\n\"\nprint (objective_function)\n\n\n#Restricciones (st: subject to)\nconstraints = \"st\\n\"\nfor i in range(n):\n constraints += \"x\" + str(i+1)\n if i<n-1:\n constraints+=\" + \"\nconstraints += \" <= \" + str(w) + \"\\n\"\n\nfor i in range(n):\n constraints += \"x\" + str(i+1) + \" <= \" + str(disponibilidad[i]) + \"\\n\"\n\nconstraints += \"\\n\"\n\nfor i in range(n):\n constraints += \"x\" + str(i+1) + \" >= 0\\n\"\n\nprint (constraints)\n\n#Generacion de archivo .lp (para LPsolve)\nfin = open(\"modelo_LINDO.ltx\",\"w\")\n\nfin.write(objective_function)\nfin.write(\"\\n\")\nfin.write(constraints)\n\nfin.close()","repo_name":"C0t300/Projecto-1-Opti","sub_path":"lindomochila.py","file_name":"lindomochila.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40669626798","text":"import itertools\nimport unittest\n\nfrom timor.Module import ModulesDB\nfrom timor.configuration_search.LiuIterator import Science2019\nfrom timor.utilities.dtypes import randomly\n\n\nclass AssemblyIteratorTest(unittest.TestCase):\n \"\"\"Test assembly iterators.\"\"\"\n def setUp(self) -> None:\n self.modrob_db = ModulesDB.from_name('PROMODULAR')\n\n def test_liu_iterator(self):\n modrob_iterator = Science2019(self.modrob_db, min_dof=1, max_dof=3, max_links_between_joints=1)\n num_assemblies_tested = 1000 # reduce the runtime of this test by limiting the number of assemblies generated\n lots_of_assemblies = []\n for assembly in itertools.islice(modrob_iterator, num_assemblies_tested):\n lots_of_assemblies.append(assembly)\n assembly_modules = tuple(assembly.internal_module_ids for assembly in lots_of_assemblies)\n self.assertEqual(len(modrob_iterator), 100548)\n self.assertEqual(len(lots_of_assemblies), len(set(assembly_modules)))\n\n # Check validity by converting 5 assemblies to a robot\n for assembly in itertools.islice(randomly(lots_of_assemblies), 5):\n robot = assembly.to_pin_robot()\n\n j = 0\n stop_at = 20\n modrob_iterator.reset()\n # Another way to check there are valid robots\n for j, assembly in enumerate(modrob_iterator):\n robot = assembly.to_pin_robot()\n if j >= stop_at:\n break\n self.assertEqual(j, stop_at) # Make sure we actually tested 100 samples\n\n with self.assertRaises(ValueError):\n # Make sure to prevent instantiating iterators where the preparations alone already take too long\n iterator = Science2019(self.modrob_db, max_dof=10, max_links_between_joints=5)\n\n smaller_iterator = Science2019(self.modrob_db, min_dof=1, max_dof=2, max_links_between_joints=1)\n self.assertEqual(len(smaller_iterator), len(tuple(smaller_iterator)))\n for i, (a1, a2) in enumerate(zip(tuple(smaller_iterator), smaller_iterator)):\n if i >= 10:\n break\n self.assertEqual(a1, a2)\n\n def test_science_paper_liu(self):\n \"\"\"\n Tests that this iterator really yields the same combinations as the ones in the science paper.\n Applies some fixes not really intended to do with this iterator, but necessary to get to the same combinations.\n This is mostly due to the domain knowledge applied by liu et al. which is specific to the schunk robot...\n \"\"\"\n schunk_db = ModulesDB.from_name('IMPROV')\n not_in_paper = ('L7', 'L10', 'PB22', 'PB24')\n for module_name in not_in_paper:\n schunk_db.remove(schunk_db.by_name[module_name])\n iterator = Science2019(schunk_db, max_dof=6, max_links_after_last_joint=1)\n\n # The possible combination of powerballs is actually hard coded in the paper\n iterator.joint_combinations = (('21', '21', '23'), ('21', '21', '21'))\n real_links = {'4', '5', '14', '15'} # No extensions\n\n def paper_compliant_combo(links: tuple[str]) -> bool:\n is_link = tuple(link for link in links if link in real_links)\n if len(is_link) != 1:\n return False\n if len(links) == 3:\n if links[1] not in real_links:\n return False\n return True\n\n link_combinations_in_paper = tuple(link_combination for link_combination in iterator.link_combinations\n if paper_compliant_combo(link_combination))\n iterator.link_combinations = link_combinations_in_paper\n\n # Make the fixes from above work\n iterator.links_between_joints = iterator._identify_links_between_joints()\n iterator.augmented_end_effectors = tuple(aeef for aeef in iterator.augmented_end_effectors if not any(\n real_link in aeef for real_link in real_links)) # Only extensions are allowed before the end effector\n iterator.joint_combinations_for_base_eef = iterator._identify_base_chain_eef()\n iterator._current_joint_combination = iterator.joint_combinations[iterator.state.joints]\n iterator._current_num_joint_combinations = len(iterator.valid_joint_combinations)\n iterator._current_num_joint_modules = len(iterator.joint_combinations[iterator.state.joints])\n\n expected_num_assemblies = 32768 # Number taken from published MATLAB implementation\n self.assertEqual(len(iterator), expected_num_assemblies)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"JonathanKuelz/timor-python","sub_path":"tests/test_assembly_iterator.py","file_name":"test_assembly_iterator.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"24391352730","text":"ghenv.Component.Name = \"DF Louver Parameters\"\nghenv.Component.NickName = 'LouverPar'\nghenv.Component.Message = '1.7.0'\nghenv.Component.Category = \"Dragonfly\"\nghenv.Component.SubCategory = '0 :: Create'\nghenv.Component.AdditionalHelpFromDocStrings = \"6\"\n\ntry:\n from ladybug_geometry.geometry2d.pointvector import Vector2D\nexcept ImportError as e:\n raise ImportError('\\nFailed to import ladybug_geometry:\\n\\t{}'.format(e))\n\ntry: # import the core dragonfly dependencies\n from dragonfly.shadingparameter import LouversByDistance, LouversByCount\nexcept ImportError as e:\n raise ImportError('\\nFailed to import dragonfly:\\n\\t{}'.format(e))\n\ntry:\n from ladybug_rhino.grasshopper import all_required_inputs\nexcept ImportError as e:\n raise ImportError('\\nFailed to import ladybug_rhino:\\n\\t{}'.format(e))\n\n\nif all_required_inputs(ghenv.Component):\n # set defaults for any blank inputs\n _facade_offset_ = _facade_offset_ if _facade_offset_ is not None else 0.0\n _angle_ = _angle_ if _angle_ is not None else 0.0\n flip_start_ = flip_start_ if flip_start_ is not None else False\n \n # process the defaults for _shade_count_ vs _dist_between\n if _shade_count_ is not None and _dist_between_ is not None:\n raise ValueError('Inputs for _shade_count_ and _dist_between_ are both set.'\n '\\nThis component accepts either method but not both.')\n elif _shade_count_ is None and _dist_between_ is None:\n _shade_count_ = 1\n \n # process the vertical_ input into a direction vector\n vertical_ = Vector2D(1, 0) if vertical_ else Vector2D(0, 1)\n \n if _shade_count_ is not None:\n shd_par = LouversByCount(_shade_count_, _depth, _facade_offset_,\n _angle_, vertical_, flip_start_)\n else:\n shd_par = LouversByDistance(_dist_between_, _depth, _facade_offset_,\n _angle_, vertical_, flip_start_)","repo_name":"ladybug-tools/dragonfly-grasshopper","sub_path":"dragonfly_grasshopper/src/DF Louver Parameters.py","file_name":"DF Louver Parameters.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"8242378741","text":"\"\"\" Test the Protein class that loads using model_builder\"\"\"\nimport pytest\nimport pyODEM\nimport os\nimport numpy as np\n\n@pytest.fixture\ndef make_lmodel_objects():\n return _make_lmodel_objects()\n\ndef _make_lmodel_objects():\n \"\"\" Make Langevin 1-D object\n\n Two wells with sigma 0.1 are placed at r0=0.5 and r0=2.5\n \"\"\"\n cwd = os.getcwd()\n\n lmodel = pyODEM.model_loaders.LangevinCustom()\n\n lmodel.set_beta(1.0)\n\n parameters = {\"epsilons\":1.0, \"r0\": 0.5, \"sigma\":0.1}\n lmodel.add_gaussian(parameters)\n parameters = {\"epsilons\":1.0, \"r0\": 2.5, \"sigma\":0.1}\n lmodel.add_gaussian(parameters)\n\n return lmodel\n\nclass TestLangevin(object):\n def test_import_langevin(self, make_lmodel_objects):\n \"\"\" Confirm LangevinCustom class loaded values correctly \"\"\"\n # test that the various values are correctly loaded\n lmodel = make_lmodel_objects\n assert lmodel.parameters[0][0] == 0.5\n assert lmodel.parameters[1][0] == 2.5\n assert lmodel.parameters[0][1] == 0.1\n assert lmodel.parameters[1][1] == 0.1\n assert lmodel.beta == 1.0\n\n\n def test_langevin_energies(self, make_lmodel_objects):\n \"\"\" Confirm langevin energy computation is correct\n\n There are two gaussian wells placed at 0.5 and 2.5. Given the width of\n each well, these values and floating point addition, the total energy\n is 1 for each data point centered in a well, (scaled by *-lmodel.beta)\n\n \"\"\"\n\n lmodel = make_lmodel_objects\n\n data = np.array([0.5, 2.5])\n\n heps, deps = lmodel.get_potentials_epsilon(data)\n\n # note, values are not truly 1 or 0, 1 + 1.3*10**-87 == 1 due to discrete arithmetic\n assert heps(lmodel.epsilons)[0] == 1\n assert heps(lmodel.epsilons)[1] == 1\n assert heps(lmodel.epsilons - 0.5)[0] == 0.5\n assert heps(lmodel.epsilons - 0.5)[1] == 0.5\n assert heps(np.array([0,1]))[0] < 10**-7\n assert heps(np.array([0,1]))[1] == 1\n\n def test_lmodel_derivatives(self, make_lmodel_objects):\n \"\"\" Confirm calculation of derivative is correct\n\n This test will confirm if deps correctly determines the derivative by\n comparing with the numeric derivative computed from heps\n\n \"\"\"\n\n lmodel = make_lmodel_objects\n data = np.array([0.5, 2.5])\n heps, deps = lmodel.get_potentials_epsilon(data)\n derivatives = np.array(deps(lmodel.epsilons))\n\n for frame in range(np.shape(derivatives)[1]):\n deriv = derivatives[:, frame]\n magnitude = np.sqrt(np.sum(deriv ** 2))\n direction = deriv / magnitude\n step_size = 0.01\n diff = heps(lmodel.epsilons + (direction*step_size*0.5)) - heps(lmodel.epsilons - (direction*step_size*0.5))\n numeric_magnitude = diff[frame] / step_size\n\n assert np.abs(magnitude - numeric_magnitude) < 0.000001\n","repo_name":"ClementiGroup/pyODEM","sub_path":"test/model_loaders_LangevinCustom_test.py","file_name":"model_loaders_LangevinCustom_test.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"37269418231","text":"import random\nimport Levenshtein\n\nfrom enum import Enum\n\nfrom scipy.stats import bernoulli\n\n\nfrom .constants import (\n REDIS_ZORBS,\n REDIS_WORLD,\n DAY_REDUCE_ENERGY,\n DAY_REDUCE_LIFENESS,\n ENERGY_QUANTUM\n)\n\nfrom .zorb import Zorb, get_zorbs, get_random_zorb\nfrom .inout import get_db\nfrom .event import EventType, Event\n\n\nclass World(object):\n \"\"\"Shared instance value\n \"\"\"\n __instance = None\n genuine_properties = (\"size\", \"energy\", \"reproducetibility\", \"day\")\n\n def __new__(cls, **kwargs):\n if World.__instance is None:\n World.__instance = object.__new__(cls)\n return World.__instance\n\n def __init__(self, **kwargs):\n if kwargs:\n properties = (\n (k, v)\n for k, v in kwargs.items()\n if k in self.genuine_properties\n )\n for attribute, value in properties:\n setattr(self, attribute, value)\n else:\n db = get_db()\n attrs = db.hgetall(REDIS_WORLD)\n if not attrs:\n raise ValueError()\n for attr, value in attrs.items():\n attr, value = attr.decode(\"utf-8\"), value.decode(\"utf-8\")\n setattr(self, attr, float(value))\n\n def __del__(self):\n # Update in Redis\n print(self.__dict__)\n if self.__dict__:\n db = get_db()\n db.hmset(REDIS_WORLD, self.__dict__)\n\n @property\n def zorbs_no(self):\n db = get_db()\n return db.scard(REDIS_ZORBS)\n\n def next_day(self):\n self.day += 1\n\n\nclass Actions(Enum):\n \"\"\"Actions\n \"\"\"\n energy = 1\n zorb = 2\n none = 3\n attack = 4\n reproduce = 5\n run = 6\n\n\ndef zorb_day(world, zorb):\n\n def energy_zorb_or_none(world):\n \"\"\"\n p_find_energy = max_energy_day / world_size\n p_find_zorb = no_zorbs / world_size\n \"\"\"\n\n energy_found = bernoulli.rvs(\n (world.energy / world.size),\n size=1\n )\n if energy_found[0] == 1:\n return Actions.energy\n\n zorb_found = bernoulli.rvs(\n (world.zorbs_no / world.size),\n size=1\n )\n if zorb_found[0] == 1:\n return Actions.zorb\n\n return Actions.none\n\n def attack_reproduce_or_run(world, zorb_a, zorb_b):\n def can_attack(zorb_a, zorb_b):\n attack_defense_ratio = zorb_b.defense / zorb_a.attack\n speed_ratio = zorb_b.speed / zorb_a.speed\n\n return (attack_defense_ratio - random.expovariate(lambd=speed_ratio)) > 1\n\n def can_reproduce(world, zorb_a, zorb_b):\n \"\"\" 2 given zorbs can reproduce if diff is less than a given percentage\n \"\"\"\n return Levenshtein.distance(zorb_a.dna, zorb_b.dna) < world.reproducetibility\n\n if can_reproduce(world, zorb_a, zorb_b):\n return Actions.reproduce\n elif can_attack(zorb_a, zorb_b):\n return Actions.attack\n else:\n return Actions.run\n\n action = energy_zorb_or_none(world)\n\n if action == Actions.energy:\n print(\"Energy found zid:`{}` total_energy :`{}`\".format(zorb.zid, zorb.energy))\n\n # Create event\n event_data = zorb.__dict__\n event_data[\"feed\"] = ENERGY_QUANTUM\n Event(event_type=EventType.Feed, data=event_data)\n\n zorb.energy += ENERGY_QUANTUM\n\n elif action == Actions.zorb:\n # Meet a zorb\n found_a_zorb = get_random_zorb()\n\n action = attack_reproduce_or_run(world, zorb, found_a_zorb)\n\n if action == Actions.attack:\n attack(world, zorb, found_a_zorb)\n\n elif action == Actions.reproduce:\n reproduce(world, zorb, found_a_zorb)\n\n # Finalize day\n zorb.lifeness -= DAY_REDUCE_LIFENESS\n zorb.energy -= zorb.consume / DAY_REDUCE_ENERGY\n if zorb.lifeness < 0 or zorb.energy < 0:\n zorb.alive = False\n\n\ndef attack(world, zorb_a, zorb_b):\n if round(zorb_a.attack, 2) == round(zorb_b.attack, 2):\n # Even attact (Fight)\n event_data = zorb_a.__dict__\n event_data = zorb_b.__dict__\n Event(event_type=EventType.Fight, data=event_data)\n\n # Both alive, both lose energy, but zorb_a lose 60% extra\n zorb_a.energy -= (zorb_a.consume * 1.6) / DAY_REDUCE_ENERGY\n zorb_b.energy -= zorb_b.consume / DAY_REDUCE_ENERGY\n\n # Both lose liveness\n zorb_a.lifeness -= DAY_REDUCE_LIFENESS\n zorb_b.lifeness -= DAY_REDUCE_LIFENESS\n elif zorb_a.attack > zorb_b.attack:\n # Stronger attact (Hunt)\n event_data = zorb_a.__dict__\n event_data = zorb_b.__dict__\n Event(event_type=EventType.Hunt, data=event_data)\n\n print(\"Zorb `{}` hunted zid:`{}` \".format(zorb_a.zid, zorb_a.zid))\n\n # zorb_a alive only, zorb_a recovers 80% of zorb_b energy, liveness slightly\n zorb_a.energy += zorb_b.energy * 0.6\n\n # zorb_b dead\n zorb_b.alive = False\n pass\n else:\n # Weaker attact\n\n # zorb_a P alive (zorb_b.attack / zorb_a.defense)\n if (zorb_b.attack / zorb_a.defense) >= 1:\n zorb_a.alive = True\n # zorb_a, if alive, lose enery, lose liveness sensible\n zorb_a.energy -= (zorb_a.consume * 1.8) / DAY_REDUCE_ENERGY\n zorb_a.lifeness -= DAY_REDUCE_LIFENESS * 2\n\n event_data = zorb_a.__dict__\n event_data = zorb_b.__dict__\n Event(event_type=EventType.Fight, data=event_data)\n else:\n zorb_a.alive = False\n event_data = zorb_a.__dict__\n event_data = zorb_b.__dict__\n Event(event_type=EventType.Kill, data=event_data)\n\n # zorb_b alive, lose energy, liveness slightly\n zorb_b.lifeness -= DAY_REDUCE_LIFENESS\n zorb_a.energy -= (zorb_a.consume * 1.2) / DAY_REDUCE_ENERGY\n\n\ndef reproduce(world, zorb_a, zorb_b):\n \"\"\"Share 50% energy of both parents\n Creates 3 sons with the avg of all attributes\n \"\"\"\n\n event_data = zorb_a.__dict__\n event_data = zorb_b.__dict__\n Event(event_type=EventType.Reproduced, data=event_data)\n\n avgs = {\n k: (getattr(zorb_a, k) + getattr(zorb_b, k))/2\n for k in Zorb.INMUTABLE_ATTRIBUTES\n }\n zorb_a.energy, zorb_b.energy = zorb_a.energy * .5, zorb_b.energy * .5\n avgs[\"energy\"] = (zorb_a.energy + zorb_b.energy) / 3\n [Zorb.birth(world, avgs) for _ in range(3)]\n\n\ndef play(world):\n \"\"\"Play 1 day of this world\n \"\"\"\n for zorb in get_zorbs():\n zorb_day(world, zorb)\n","repo_name":"toloco/a_life_game","sub_path":"sandbox/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27205843088","text":"from algorithms.BurrowsWheelerTransform import BurrowsWheelerTransform\nfrom algorithms.MoveToFront import MoveToFront\nfrom algorithms.Utils import Utils\n\nburrowsWheelerTransform = BurrowsWheelerTransform()\nmoveToFront = MoveToFront()\nutils = Utils()\n\narithmetic_encoded_file_name = \"output/arithmetic_encoded.txt\"\nbwt_decoded_file_name = \"output/bwt_decoded.txt\"\nmtf_decoded_file_name = \"output/mtf_decoded.txt\"\narithmetic_decoded_file_name = \"output/arithmetic_decoded.txt\"\n\nutils.arithmetic_decode(arithmetic_encoded_file_name, arithmetic_decoded_file_name)\n\nutils.replace_file_endings(arithmetic_decoded_file_name)\n\nmtf_encoded_file = open(arithmetic_decoded_file_name, \"r\")\ncode_list = [int(i) for i in mtf_encoded_file.read().split(\" \")]\nmtf_decoded_file = open(mtf_decoded_file_name, \"w+\")\ndecode = moveToFront.decode(code_list)\nmtf_decoded_file.write(decode)\nmtf_decoded_file.close()\n\n# decode\nbwt_encoded_file = open(mtf_decoded_file_name, \"r\")\nbwt_decoded_file = open(bwt_decoded_file_name, \"w+\")\nfor byte_string in bwt_encoded_file.read().split(\"]\"):\n text = byte_string.split('[')\n if len(text) > 1:\n bwt_decoded_file.write(burrowsWheelerTransform.decode(int(text[0]), text[1]))\n\nbwt_encoded_file.close()\nbwt_decoded_file.close()\n\n","repo_name":"duzenz/data-compression-project","sub_path":"FullChainDecompress.py","file_name":"FullChainDecompress.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"39117698555","text":"# encoding: utf8\nimport os\nimport json\nimport operator\nimport subprocess\n\nfrom contextlib import contextmanager\nfrom functools import reduce\nfrom pathlib import Path\nfrom subprocess import check_call, check_output\nfrom tempfile import TemporaryDirectory\nfrom typing import NewType, Dict, Any, List, Iterator\n\nfrom halo import Halo\n\nfrom experiments.system import System\n\n\nRegion = NewType(\"Region\", str)\nSHA = NewType(\"SHA\", str)\nAMI = NewType(\"AMI\", str)\nInstanceType = NewType(\"InstanceType\", str)\n\nAWS_REGION = Region(\"us-east-2\")\nDEFAULT_INSTANCE_TYPE = InstanceType(\"c5.4xlarge\")\n\n\nclass NoImageError(Exception):\n pass\n\n\ndef format_args(var_dict: Dict[str, Any]) -> List:\n return reduce(operator.add, [[\"-var\", f\"{k}={v}\"] for k, v in var_dict.items()])\n\n\n@contextmanager\ndef terraform(tf_vars: Dict[str, Any], tf_dir: Path) -> Iterator[Dict[Any, Any]]:\n if \"AWS_ACCESS_KEY_ID\" not in os.environ:\n raise RuntimeError(\"Missing AWS creds\")\n with TemporaryDirectory() as tmpdir:\n with Halo(\"[infrastructure] checking current state\") as spinner:\n plan = Path(tmpdir) / \"tfplan\"\n tf_args = format_args(tf_vars)\n cmd = [\"terraform\", \"plan\", f\"-out={plan}\", \"-no-color\"] + tf_args\n try:\n plan_output = check_output(cmd, stderr=subprocess.STDOUT, cwd=tf_dir)\n except subprocess.CalledProcessError as err:\n if \"terraform init\" in err.output.decode(\"utf8\"):\n # we know what to do here\n spinner.text = \"[infrastructure] initializing plugins\"\n check_output([\"terraform\", \"init\"], cwd=tf_dir)\n spinner.text = \"[infrastructure] checking current state\"\n plan_output = check_output(cmd, cwd=tf_dir)\n elif \"Your query returned no results\" in err.output.decode(\"utf8\"):\n raise NoImageError() from err\n else:\n with open(\"terraform.log\", \"w\") as log_file:\n log_file.write(err.output.decode(\"utf8\"))\n raise\n changes = [\n l\n for l in plan_output.decode(\"utf8\").split(\"\\n\")\n if l.lstrip().startswith(\"#\")\n ]\n\n if changes:\n spinner.succeed(\"[infrastructure] found changes to apply:\")\n for change in changes:\n if (\n \"unchanged attributes hidden\" in change\n or \"unchanged element hidden\" in change\n ):\n continue\n change = change.lstrip(\" #\")\n print(f\" • {change}\")\n else:\n spinner.info(\"[infrastructure] no changes to apply\")\n\n if changes:\n with Halo(\n \"[infrastructure] applying changes (output in [terraform.log])\"\n ) as spinner:\n with open(\"terraform.log\", \"w\") as log_file:\n cmd = [\n \"terraform\",\n \"apply\",\n \"-refresh=false\",\n \"-auto-approve\",\n str(plan),\n ]\n check_call(cmd, stdout=log_file, cwd=tf_dir)\n spinner.succeed(\"[infrastructure] created\")\n\n data = json.loads(check_output([\"terraform\", \"output\", \"-json\"], cwd=tf_dir))\n yield {k: v[\"value\"] for k, v in data.items()}\n\n\n@contextmanager\ndef cleanup(system: System):\n try:\n yield\n finally:\n tf_vars = system.environment.make_tf_cleanup_vars()\n tf_args = format_args(tf_vars)\n with Halo(\"[infrastructure] tearing down all resources\") as spinner:\n check_call(\n [\"terraform\", \"destroy\", \"-auto-approve\"] + tf_args,\n stdout=subprocess.DEVNULL,\n cwd=system.root_dir,\n )\n spinner.succeed()\n","repo_name":"znewman01/spectrum-impl","sub_path":"experiments/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"43540410944","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\n'''\n26을 받으면? 이걸 잘라서 써야겠당\n\n#문자열로받아서\ntemp=26\n\n#여기는 문자열\nfir=temp[0]\nsec=temp[1]\n\n#더해줄땐 int로 다시바꿈\nnew_temp=int(fir)+int(sec)\n\n#새로운 숫자를 만둘자\n#new=str(sec)+str(new_temp)\n\n#while로 돌려야겠다\n#new까지하고 카운트 +1\n#그리고 만약에 다시 원래 temp와 같아지면 그때 print(cnt) break\n'''\n\n# origin=input()\n# made=''\n# made=origin\n# cnt=0\n# new=''\n# while True:\n# if new==origin:\n# print(cnt)\n# break\n# fir,sec=made[0],made[1]\n# temp=int(fir)+int(sec)\n# new=str(sec)+str(str(temp)[1])\n# made=new\n# cnt+=1\n\n\n'while문 안에서 다 처리하고 싶은데 어떻게 해야하지????'\n\norigin=int(input())\ntemp=origin\ncycle=0\n\nwhile True:\n new=(temp//10)+(temp%10)\n new_num=(temp%10)*10+(new%10) #이게 68이 나옴\n cycle+=1\n temp=new_num\n if origin == new_num:\n print(cycle)\n break\n\n\n\n\n\n","repo_name":"kimchaelin13/Algorithm","sub_path":"boj/BOJ_더하기 사이클.py","file_name":"BOJ_더하기 사이클.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"14503523210","text":"\n##진행순서##\n#1. 뉴스 기사 (base64 파일) 디코딩\n#2. 문서 요약/키워드 추출\n#3. 요약 리포트 작성\n#4. html 파일로 Export\n\n#1. 뉴스 기사 디코딩\n#1-1. base64 파일 읽기\nimport base64\n\nf = open(\"image\", 'rb') #rb(read byte)\nimage = f.readlines() #파일 전체를 한 라인씩 읽어와 리스트를 만들어주는 메소드\nf.close()\n\nprint(image[0])\n\nf = open(\"article\", 'rb')\narticle = f.readlines()\nf.close()\n\nprint(article)\n\n#1-2. 기사 이미지 디코딩\nfile_base64 = image[0]\n\npath = \"image.jpg\"\nwith open(path, 'wb') as f: #wb(write byte)\n decoded_data = base64.decodebytes(file_base64)\n f.write(decoded_data)\n\nfrom PIL import Image\nimg = Image.open(path)\nimg\n\n#1-3. 기사 디코딩\nfile_base64 = article[0]\ndecoded_data = base64.decodebytes(file_base64)\ndecoded_data\n\narticle = decoded_data.decode('utf-8')\nprint(article)\n\n#2. 문서 요약/키워드 추출\nfrom gensim.summarization.summarizer import summarize\nfrom gensim.summarization.textcleaner import split_sentences\n\n#2-1. 단어수 기반 요약 (word_count)\nprint(summarize(article, word_count=50))\n\n#2-2. 비율 기반 요약(ratio)\nprint(summarize(article, ratio=0.1))\n\n#2-3. 요약한 텍스트 저장\narticle_summarize = summarize(article, ratio=0.1)\n\n#2-4. 키워드 추출\nimport collections\nimport textwrap\nimport re\n\n#2-5. 줄바꿈 정렬\narticle_align = textwrap.fill(article, width=50)\nprint(article_align)\n\n#2-6. 단어 추출\nwords = re.findall(r'\\w+', article_align)\n#findall : 정규식과 매치되는 모든 문자열을 list로 반환\n#\\w+ 는 문자+숫자와 매치 공백 기준으로 단어 추출(?)\n\nprint(words)\n\n#2-7. 빈도수 산출\ncounter = collections.Counter(words)\nprint(counter)\n\n#2-8. 키워드 추출\nprint(counter.most_common(5))\nkeywords = counter.most_common(5)[1:]\nprint(keywords)\n\n#3. 요약 리포트 작성 \nfrom IPython.display import Image\nImage(filename=path, width=300)\n\nprint(article_summarize)\n\nkeys = ['#' + i[0] for i in keywords]\nkeys = ' '.join(keys) #매개변수로 들어온 리스트를 문자열로 합쳐서 반환해주는 함수\nprint(keys)\n\n#3-1. html 파일로 저장\nhtmlfile = open(\"summary.html\", \"w\")\nhtmlfile.write(\"<html>\\n\")\nhtmlfile.write(\"<h1>\" + '카운트다운 들어간 아르테미스 계획 \"달의 여신\"은 미소지을까' + \"</h2>\\n\")\nhtmlfile.write(\"<img src='image.jpg' />\\n\")\nhtmlfile.write(\"<h2>\" + article_summarize + \"</h2>\\n\")\nhtmlfile.write(\"<h2 style='background-color:powderblue;''>\"+ keys + \"</h2>\\n\")\nhtmlfile.write(\"</html>\\n\")\nhtmlfile.close()\n\n","repo_name":"Jeonghy0517/News-Summary","sub_path":"news_summary.py","file_name":"news_summary.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8807771070","text":"import os\nimport sys\nimport unittest\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom shiboken_paths import init_paths\ninit_paths()\n\nfrom sample import ListUser, Point, PointF\n\nclass ExtendedListUser(ListUser):\n def __init__(self):\n ListUser.__init__(self)\n self.create_list_called = False\n\n def createList(self):\n self.create_list_called = True\n return [2, 3, 5, 7, 13]\n\nclass ListConversionTest(unittest.TestCase):\n '''Test case for std::list container conversions'''\n\n def testReimplementedVirtualMethodCall(self):\n '''Test if a Python override of a virtual method is correctly called from C++.'''\n lu = ExtendedListUser()\n lst = lu.callCreateList()\n self.assertTrue(lu.create_list_called)\n self.assertEqual(type(lst), list)\n for item in lst:\n self.assertEqual(type(item), int)\n\n def testPrimitiveConversionInsideContainer(self):\n '''Test primitive type conversion inside conversible std::list container.'''\n cpx0 = complex(1.2, 3.4)\n cpx1 = complex(5.6, 7.8)\n lst = ListUser.createComplexList(cpx0, cpx1)\n self.assertEqual(type(lst), list)\n for item in lst:\n self.assertEqual(type(item), complex)\n self.assertEqual(lst, [cpx0, cpx1])\n\n def testSumListIntegers(self):\n '''Test method that sums a list of integer values.'''\n lu = ListUser()\n lst = [3, 5, 7]\n result = lu.sumList(lst)\n self.assertEqual(result, sum(lst))\n\n def testSumListFloats(self):\n '''Test method that sums a list of float values.'''\n lu = ListUser()\n lst = [3.3, 4.4, 5.5]\n result = lu.sumList(lst)\n self.assertEqual(result, sum(lst))\n\n def testConversionInBothDirections(self):\n '''Test converting a list from Python to C++ and back again.'''\n lu = ListUser()\n lst = [3, 5, 7]\n lu.setList(lst)\n result = lu.getList()\n self.assertEqual(result, lst)\n\n def testConversionInBothDirectionsWithSimilarContainer(self):\n '''Test converting a tuple, instead of the expected list, from Python to C++ and back again.'''\n lu = ListUser()\n lst = (3, 5, 7)\n lu.setList(lst)\n result = lu.getList()\n self.assertNotEqual(result, lst)\n self.assertEqual(result, list(lst))\n\n def testConversionOfListOfObjectsPassedAsArgument(self):\n '''Calls method with a Python list of wrapped objects to be converted to a C++ list.'''\n mult = 3\n pts0 = (Point(1.0, 2.0), Point(3.3, 4.4), Point(5, 6))\n pts1 = (Point(1.0, 2.0), Point(3.3, 4.4), Point(5, 6))\n ListUser.multiplyPointList(pts1, mult)\n for pt0, pt1 in zip(pts0, pts1):\n self.assertEqual(pt0.x() * mult, pt1.x())\n self.assertEqual(pt0.y() * mult, pt1.y())\n\n def testConversionOfInvalidLists(self):\n mult = 3\n pts = (Point(1.0, 2.0), 3, Point(5, 6))\n self.assertRaises(TypeError, ListUser.multiplyPointList, pts, mult)\n\n def testOverloadMethodReceivingRelatedContainerTypes(self):\n self.assertEqual(ListUser.ListOfPointF, ListUser.listOfPoints([PointF()]))\n self.assertEqual(ListUser.ListOfPoint, ListUser.listOfPoints([Point()]))\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"pyside/pyside2-setup","sub_path":"sources/shiboken2/tests/samplebinding/list_test.py","file_name":"list_test.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"4"} +{"seq_id":"19949334209","text":"import tkinter as tk\r\nimport random\r\n\r\nname = \"Stock name\"\r\nprice_bought, number = 0, \"Number of stocks bought\"\r\nstock = [[\"Apple Inc.\", 499.3, 60], [\"TATA Consumer Products Ltd.\", 550.45, 8],\r\n [\"Microsoft\", 216.47, 12], [\"Reliance Industries Ltd.\", 208.77, 31.2],\r\n [\"Jindal Steel & Power Ltd.\", 199, 5], [\"Airtel Ltd.\", 514.65, 55],\r\n [\"Cipla Ltd.\", 728.6, 9], [\"Bajaj Finance Ltd.\", 670, 97],\r\n [\"Intel Corporation\", 277.25, 7],\r\n [\"Apollo Hospitals Enterprises Ltd.\", 103.9, 19.5],\r\n [\"Amazon.com Inc.\", 191.30, 6.5],\r\n [\"Alphabet Inc. (Google)\", 177.7, 3.5]]\r\ntransactions=[] #[(day_num, buy/sell, name, price_bought, number)]\r\nstock_bought = {}\r\nmoney = 5000\r\ndaynum = 1\r\nflag1, flag2, flag3 = 0, 0, 0\r\n\r\n# Stock Price Variability Function\r\ndef rnge(start, stop):\r\n ''''\r\n Function to vary the price of a stock along with decimal variation\r\n Parameters:\r\n start- lower bound for increase/decrease in price\r\n stop- upper bound for increase/decrease in price\r\n Returns a value with variation in initial price\r\n '''\r\n dec_part1 = start % 1\r\n dec_part2 = stop % 1\r\n int_part1 = int(start - dec_part1)\r\n int_part2 = int(stop - dec_part2)\r\n dec_part1 = round(dec_part1 * 10)\r\n dec_part2 = round(dec_part2 * 10)\r\n int_random = random.randrange(int_part1, int_part2 + 1)\r\n dec_random = random.randrange(dec_part1, dec_part2 + 1)\r\n s = str(int_random) + \".\" + str(dec_random)\r\n return eval(s)\r\n\r\n\r\n# Function to increase or decrease stock prices\r\ndef inc_dec():\r\n for i in range(len(stock)):\r\n if random.randrange(1, 3) > 1:\r\n # increase in price\r\n inc(stock, i)\r\n else:\r\n # decrease in price\r\n dec(stock, i)\r\n\r\n\r\n# increase in price\r\ndef inc(a, i):\r\n a[i][1] = a[i][1] + rnge(0, a[i][2])\r\n\r\n\r\n# decrease in price\r\ndef dec(b, i):\r\n b[i][1] = b[i][1] - rnge(0, b[i][2])\r\n\r\n\r\n# Function to display stocks in a defined format.\r\ndef goodprinting(stock):\r\n stock_avail = \"\"\r\n for i in range(len(stock)):\r\n stock_avail = stock_avail + str(i + 1) + \": \" + str(\r\n stock[i][0]) + ' : ' + str(stock[i][1]) + \"\\n\"\r\n return stock_avail\r\n\r\n\r\n#Function to create buttons of all stocks' names.\r\ndef buyWindow1():\r\n buy_root = tk.Tk()\r\n buy_root.configure(bg=\"#000000\")\r\n\r\n def Button_Create(name, price):\r\n Button_new = tk.Button(\r\n buy_root,\r\n bg=\"#FFFFFF\",\r\n fg=\"#000000\",\r\n text=name + \": \" + str(price),\r\n command=lambda: buyWindow2(name, price, 'Enter number of stocks to be bought:') or buy_root.destroy())\r\n Button_new.pack()\r\n\r\n for i in stock:\r\n Button_Create(i[0], i[1])\r\n buy_root.mainloop()\r\n\r\n\r\n#Function to take input about which stock is to be bought.\r\ndef buyWindow2(name, price, Text):\r\n Window = tk.Tk()\r\n label = tk.Label(master=Window, text=Text)\r\n label.grid(row=0, column=0)\r\n\r\n entry = tk.Entry(master=Window, bg=\"#FFFFFF\")\r\n entry.grid(row=0, column=1)\r\n\r\n button = tk.Button(\r\n master=Window,\r\n text=\"Submit\",\r\n command=lambda: buy(name, entry.get(), price) or Window.destroy())\r\n button.grid(row=1, column=1)\r\n\r\n\r\n#Function to buy the stock and add it to the gamer's portfolio.\r\ndef buy(name, numberofstocksbought, price):\r\n global money\r\n if int(numberofstocksbought) <= 0 or not (numberofstocksbought.isdigit(\r\n )) or money < int(numberofstocksbought) * price:\r\n buyWindow2(name, price, \"Enter valid number of stocks to be bought\")\r\n elif money > int(numberofstocksbought) * price:\r\n if len(stock_bought) > 0 and (name in stock_bought.keys()):\r\n stock_bought[name] += int(numberofstocksbought)\r\n money -= int(numberofstocksbought) * price\r\n else:\r\n stock_bought[name] = int(numberofstocksbought)\r\n money -= int(numberofstocksbought) * price\r\n events.insert(\r\n tk.END, \"You have \" + str(money) + \" units of money.\" + \"\\n\\n\")\r\n events.see(tk.END)\r\n transactions.append((str(daynum),\"BOUGHT\",name,str(price),str(numberofstocksbought)))\r\n\r\n\r\ndef sellWindow1():\r\n sell_root = tk.Tk()\r\n sell_root.configure(bg=\"#000000\")\r\n\r\n def Button_Create(name, price, number):\r\n Button_new = tk.Button(\r\n sell_root,\r\n bg=\"#FFFFFF\",\r\n fg=\"#000000\",\r\n text=name + \": \" + str(price) + \" No.: \" + str(number),\r\n command=lambda: sellWindow2(name, price, 'Enter number of stocks to be sold:') or sell_root.destroy())\r\n Button_new.pack()\r\n\r\n p = 0\r\n for i in stock_bought:\r\n for j in stock:\r\n if i in j:\r\n p = j[1]\r\n if len(stock_bought) > 0:\r\n Button_Create(i, p, stock_bought[i])\r\n\r\n\r\ndef sellWindow2(name, price, Text):\r\n Window = tk.Tk()\r\n label = tk.Label(master=Window, text=Text)\r\n label.grid(row=0, column=0)\r\n\r\n entry = tk.Entry(master=Window, bg=\"#FFFFFF\")\r\n entry.grid(row=0, column=1)\r\n\r\n button = tk.Button(\r\n master=Window,\r\n text=\"Submit\",\r\n command=lambda: sell(name, entry.get(), price) or Window.destroy())\r\n button.grid(row=1, column=1)\r\n\r\n\r\ndef sell(name, number, price):\r\n global money\r\n if int(number) <= 0 or not (number.isdigit()) or int(number) > stock_bought[name]:\r\n sellWindow2(name, price, \"Enter a valid number of stocks to be sold.\")\r\n elif stock_bought[name] >= int(number):\r\n stock_bought[name] -= int(number)\r\n money += int(number) * price\r\n events.insert(tk.END, \"You have \" + str(money) + \" units of money.\" + \"\\n\\n\")\r\n events.see(tk.END)\r\n transactions.append((str(daynum),\"SOLD\",name,str(price),str(number)))\r\n\r\n\r\ndef portfolio():\r\n \r\n global daynum\r\n global transactions\r\n displaystock = \"\"\r\n \r\n portfolio_root = tk.Tk()\r\n portfolio_root.geometry(\"600x600\")\r\n\r\n labelHead = tk.Label(portfolio_root,\r\n bg=\"#000000\",\r\n fg=\"#EAECEE\",\r\n text='Daily Portfolio',\r\n font=\"Helvetica 13 bold\",\r\n pady=5)\r\n labelHead.place(relwidth=1)\r\n\r\n for i in stock_bought:\r\n if len(stock_bought) > 0:\r\n displaystock = displaystock + i + \": \" + str(\r\n stock_bought[i]) + \"\\n\"\r\n else:\r\n displaystock = \"None\"\r\n\r\n port_text = tk.Text(portfolio_root,\r\n height=2,\r\n bg=\"#000000\",\r\n fg=\"#EAECEE\",\r\n font=\"Helvetica 14\",\r\n padx=5,\r\n pady=5)\r\n port_text.place(relheight=0.8, relwidth=1, rely=0.045)\r\n port_text.insert(tk.END, 'Your stocks: \\n')\r\n port_text.insert(tk.END, displaystock + '\\n\\n')\r\n\r\n port_text.insert(tk.END, 'Your transaction history: \\n')\r\n if bool(transactions):\r\n for i in transactions:\r\n port_text.insert(tk.END, (\"Day Number: \" + i[0] + \": \" + i[1] + \" \" + i[4] + \" stocks of \" + i[2] + \" for \" + i[3] + \" units \\n\"))\r\n port_text.insert(tk.END, '\\n')\r\n port_text.see(tk.END)\r\n \r\n labelBottom = tk.Label(portfolio_root, bg=\"#000000\", height=80)\r\n labelBottom.place(relwidth=1, rely=0.825)\r\n\r\n\r\ndef nextday():\r\n global daynum\r\n global money\r\n daynum += 1\r\n inc_dec()\r\n rand_events()\r\n events.insert(tk.END, \"Day number \" + str(daynum) + \"\\n\\n\")\r\n button_buy['state'] = tk.NORMAL\r\n \r\n if money < 1000 and money > 0:\r\n events.insert(tk.END, \"Warning! You have only \" + str(money) + \" units left.\" + \"\\n\\n\")\r\n elif money <= 0:\r\n events.insert(tk.END, \"You have spent all your units of money. You will be unable to buy anymore stocks\\\r\n till you sell some stocks or get a bonus. \\n\\n\")\r\n money = 0\r\n button_buy['state'] = tk.DISABLED\r\n\r\n if daynum == 20:\r\n events.insert(tk.END, \"Your uncle is almost back! Maximise profits. \\n\\n\")\r\n elif daynum == 28:\r\n events.insert(\r\n tk.END,\r\n \"Your uncle is almost there gamer! You've done well. Keep up the hardwork, just a couple more days.\\\r\n When your uncle comes back, he shall evaluate you based on your profit earned and the amount you currently have invested.\"\r\n + \"\\n\\n\")\r\n elif daynum == 29:\r\n events.insert(tk.END, \"Hello Gamer. Your uncle will review your file in a while now. Make your final transactions\\\r\n NOW! \\n\\n\")\r\n elif daynum >= 30:\r\n events.insert(tk.END, \"Your uncle has returned! Here is your game summary. \\n\\n\")\r\n summary()\r\n button_buy['state'] = tk.DISABLED\r\n button_sell['state'] = tk.DISABLED\r\n button_portfolio['state'] = tk.DISABLED\r\n button_next['state'] = tk.DISABLED\r\n events.see(tk.END)\r\n\r\n\r\ndef summary():\r\n global money\r\n total = 0\r\n events.insert(tk.END, \"Here is your remaining balance: \" + str(money) + \"\\n\")\r\n events.see(tk.END)\r\n cost = 1\r\n for i in stock_bought:\r\n for j in stock:\r\n if i in j:\r\n cost = j[1]\r\n total += cost * stock_bought[i]\r\n events.insert(tk.END, \"The total amount of money (after selling your stocks) you have is \" +\r\n str(total + money) + \"\\n\")\r\n events.see(tk.END)\r\n if (total + money) > 5000:\r\n events.insert(tk.END, \"Congratulations! You have earned \" + str((total+money) - 5000) +\r\n \" units of money. You are a natural stockbroker.\")\r\n elif (total + money) <= 5000:\r\n events.insert(tk.END, \"You did not make any profit. Better luck next time!\")\r\n events.see(tk.END)\r\n\r\n\r\ndef endgame():\r\n end_window = tk.Tk()\r\n label = tk.Label(\r\n master=end_window,\r\n text='Are you sure you want to leave. Your progress will not be saved.'\r\n )\r\n label.grid(row=0, column=1)\r\n\r\n Confirm_button = tk.Button(\r\n master=end_window,\r\n text=\"YES\",\r\n fg='#006400',\r\n command=lambda: end_window.destroy() or root.destroy())\r\n Confirm_button.grid(row=1, column=0)\r\n\r\n Return_button = tk.Button(master=end_window,\r\n text=\"NO\",\r\n fg='#FF0000',\r\n command=end_window.destroy)\r\n Return_button.grid(row=1, column=2)\r\n\r\n\r\n# Function to create random events.\r\ndef rand_events():\r\n global money\r\n global stock_bought\r\n global stock\r\n global flag1 \r\n global flag2\r\n global flag3\r\n number = random.randint(1, 50)\r\n \r\n if number == 51:\r\n events.insert(tk.END,\r\n \"Scandal! The stock market has plummeted due to the revelation of a huge Ponzi scheme. You have lost\\\r\n a lot of your money and all your stocks. You have to start again from scratch. \\n\\n\")\r\n if money >= 1000:\r\n money = 1000\r\n stock_bought = {}\r\n events.insert(tk.END, \"You have \" + str(money) + \" units of money. \\n\\n\")\r\n events.see(tk.END)\r\n elif (number == 10 or number == 20 or number == 30) and (flag1 < 4):\r\n events.insert(tk.END, \"Good Fortune! Increased employment and investment has awarded you with 1000 units of money! \\n\\n\")\r\n money += 1000\r\n flag1 += 1\r\n events.insert(tk.END, \"You have \" + str(money) + \" units of money. \\n\\n\")\r\n events.see(tk.END)\r\n elif number == 15:\r\n if flag2 == 1:\r\n pass\r\n else:\r\n flag2 += 1\r\n events.insert(tk.END, \"A new and upcoming company has reached new heights! It is now available to trade.\\\r\n The company is called: Tesla and the stock price is 50.00 \\n\\n\")\r\n stock.append(['Tesla', 50.0, 2.5])\r\n events.see(tk.END)\r\n elif number == 40:\r\n if flag3 == 0:\r\n events.insert(tk.END, \"Cipla Ltd. has gone bankrupt! It has been removed from the stock market and all\\\r\n stocks owned by you for it have been removed. \\n\\n\")\r\n for i in stock:\r\n if 'Cipla Ltd.' == i[0]:\r\n stock.remove(i)\r\n if 'Cipla Ltd.' in stock_bought.keys():\r\n stock_bought.pop('Cipla Ltd.')\r\n flag3 += 1\r\n events.see(tk.END)\r\n else:\r\n pass\r\n elif number==39:\r\n events.insert(tk.END, \"Congratulations! Apple Inc. has gifted you a stock. Check your portfolio to find the stock. \\n\\n\")\r\n if \"Apple Inc.\" in stock_bought.keys():\r\n stock_bought[\"Apple Inc.\"] += 1\r\n else:\r\n stock_bought[\"Apple Inc.\"] = 1\r\n events.see(tk.END)\r\n elif number == 3:\r\n if \"Reliance Industries Ltd.\" in stock_bought:\r\n events.insert(tk.END,\r\n \"You have been caught insider trading for the Reliance Industries Ltd. As a result, you have been ordered to\\\r\n pay a criminal fine of 1000 units \\n\\n\")\r\n if money >= 1000:\r\n money -= 1000\r\n elif money < 1000 and money!=0:\r\n money = 0\r\n events.insert(tk.END, \"You have \" + str(money) + \" units of money. \\n\\n\")\r\n\r\n\r\n# Main Window GUI\r\nroot = tk.Tk()\r\nroot.title(\"Stock Market Game\")\r\nroot.geometry(\"1080x720\")\r\nHeader = tk.Label(root,\r\n bg=\"#000000\",\r\n fg=\"#EAECEE\",\r\n text='Events',\r\n font=\"Helvetica 13 bold\",\r\n pady=5)\r\nHeader.place(relwidth=1)\r\n\r\nevents = tk.Text(root,\r\n height=2,\r\n bg=\"#000000\",\r\n fg=\"#EAECEE\",\r\n font=\"Helvetica 14\",\r\n padx=5,\r\n pady=5)\r\nevents.place(relheight=0.8, relwidth=1, rely=0.045)\r\n\r\nlabelBottom = tk.Label(root, bg=\"#ABB2B9\", height=80)\r\nlabelBottom.place(relwidth=1, rely=0.85)\r\n\r\nscrollbar = tk.Scrollbar(root, command=events.yview, width=8)\r\nscrollbar.pack(side=tk.RIGHT, fill='y')\r\n\r\n#In-game scenario.\r\nevents.insert(\r\n tk.END,\r\n \"Hello Gamer! You have been chosen by your stock broker uncle to continue his business\\\r\n while he goes on a much deserved holiday. He has appointed you as his chief stock broker for 30 days and you\\\r\n are determined to prove to him that you are a worthy successor. Try to make as much money as possible in these\\\r\n 30 days and impress your uncle. Good Luck and I hope you enjoy the game! \\n\\n\"\r\n)\r\n\r\n#Instructions for the gamer to understand how to play.\r\nevents.insert(\r\n tk.END, '''Here are the instructions on how to play:\r\n1. Click on the buy button to buy stocks. It will display the stock name as ABC: 123 where ABC is the name of the stock and 123 is it's current price.\r\n2. Click on the sell button to sell stocks you already own. The stocks will be displayed as ABC: 123 No.: X, where ABC is name of the stock, 123 is the current price of the stock and X shows how many stocks you own.\r\n3. Click on the Portfolio button to see the stocks you own and how many are owned. It also shows the current day in the game.\r\n4. The next day button will take you to the next day. Click it once you have finished buying or selling all the stocks you want at some steady price. By clicking next day, the current price of the stocks will either increase or decrease and you can make a profit by selling those stocks you bought at a lower price in the previous days and selling them at higher prices caused due to the fluctuation in prices.\r\n5. The close button will allow you to quit the game before it ends.\r\n6. The main window labelled Events (where you are reading this) is where the amount of money you currently have will be displayed. There will also be in game announcements on that window that can give you more money or take some away.\r\nBest of luck gamer! We hope you enjoy.\r\n\r\n''')\r\n\r\nevents.insert(tk.END, \"You have \" + str(money) + \" units of money.\" + \"\\n\\n\")\r\n\r\n#Buttons for each function: buying, selling and checking the portfolio\r\n#To create button for buying stocks.\r\nbutton_buy = tk.Button(labelBottom,\r\n text=\"BUY\",\r\n font=\"Helvetica 10 bold\",\r\n width=20,\r\n bg=\"#ABB2B9\",\r\n fg=\"#008000\",\r\n command=buyWindow1)\r\nbutton_buy.place(relx=0, rely=0.008, relheight=0.06, relwidth=0.22)\r\n\r\n#To create button for selling stocks.\r\nbutton_sell = tk.Button(labelBottom,\r\n text=\"SELL\",\r\n font=\"Helvetica 10 bold\",\r\n width=20,\r\n bg=\"#ABB2B9\",\r\n fg=\"#FF0000\",\r\n command=sellWindow1)\r\nbutton_sell.place(relx=0.25, rely=0.008, relheight=0.06, relwidth=0.22)\r\n\r\n#To create button for showing gamer's portfolio.\r\nbutton_portfolio = tk.Button(labelBottom,\r\n text=\"PORTFOLIO\",\r\n font=\"Helvetica 10 bold\",\r\n width=20,\r\n bg=\"#ABB2B9\",\r\n command=portfolio)\r\nbutton_portfolio.place(relx=0.50, rely=0.008, relheight=0.06, relwidth=0.22)\r\n\r\n#To create button for progressing the game to the next day.\r\nbutton_next = tk.Button(labelBottom,\r\n text=\"NEXT DAY\",\r\n font=\"Helvetica 10 bold\",\r\n width=20,\r\n bg=\"#ABB2B9\",\r\n fg=\"#008000\",\r\n command=nextday)\r\nbutton_next.place(relx=0.75, rely=0.008, relheight=0.06, relwidth=0.22)\r\n\r\n#To create a button through which the game can be ended early.\r\nbutton_exit = tk.Button(Header,\r\n text=\"CLOSE X\",\r\n bg=\"#FF0000\",\r\n command=lambda: endgame())\r\nbutton_exit.place(relx=0.94)\r\nevents.config(cursor=\"arrow\")\r\n","repo_name":"VyoJ/StockMarketGame","sub_path":"StockMarketGame.py","file_name":"StockMarketGame.py","file_ext":"py","file_size_in_byte":17848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1058628981","text":"import meep as mp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys, getopt,os\nfrom datetime import date\nimport pickle,json, os\nimport time\n\n\n\nclass Model:\n\n\tdef __init__(self):\n\n\t\t#init constants\n\t\tself.TicToc = self.TicTocGenerator() # create an instance of the TicTocGen generator\n\n\t\t##Material N\n\t\tself.nCoating = 1.41\n\t\tself.capN = 1.440\n\t\tself.fillN = 2.50\n\n\t\t##Capillary Dimentions\n\t\tself.OD = 62\n\t\tself.WallThick = 8.2\n\n\t\t##Resonator Dimention\n\n\t\t##Src properties\n\t\tself.fcen = 1/1.55\n\t\tself.df = 0.1e-2 #0.8e-2\n\t\tself.nfreq = 1000\n\n\t\t##MEEP properties\n\t\tself.dpml = 5\n\t\tself.res = 10/1.55\n\t\tself.DecayF = 1e-2\n\t\tself.WallT = 0\n\t\tself.SimT = 1e6\n\t\tself.today = str(date.today())\n\t\tself.workingDir= ''\n\t\tself.filename = 'test'\n\t\tself.Datafile = 'test'\n\t\tself.sim = None\n\t\tself.Objlist = []\n\t\tself.Notes = ''\n\t\tself.NormComplete = False\n\t\tself.SingleNorm = False\n\t\tself.Courant = 1/np.sqrt(2)\n\n\n\t\t#init Data arrays\n\t\tself.srcE = np.array([])\n\t\tself.tranE = np.array([])\n\n\t\tself.PDMSindex()\n\t\tself.Silicaindex()\n\n\n\tdef TestSpectrum(self):\t\n\n\t\tself.Objlist = []\t\n\t\tself.buildPolished() \t\t\t\t\t\t#builds base polished fibre structure list\n\t\tself.ADDsqrBubbles() \t\t\t\t\t#add sqr bubbles to the structure list\n\t\tself.ADDsqrEmptyBubbles()\n\t\tself.BuildModel(NormRun=False,Plot=True) \n\n\t\t#load data from the normal run\n\t\t\n\t\tself.QuickRun()\n\n\t\t\n\n\tdef mkALLDIRS(self):\n\t\t\n\t\tself.workingDir = 'data/'+self.today+'/'+self.filename+'/'\n\n\t\tprint('WD:',self.workingDir)\n\n\t\ttry:\n\t\t\tos.makedirs(self.workingDir)\n\t\texcept:\n\t\t\tprint('AlreadyDir')\n\n\t\tself.sim.use_output_directory(self.workingDir)\n\n\n\n\tdef buildFilledCapillary(self):\n\n\t\tself.sx = self.OD + 10 + 2*self.dpml\n\t\tself.sy = self.OD + 10 + 2*self.dpml\n\n\t\t\n\t\tself.cell_size = mp.Vector3(self.sx,self.sy,0)\n\n\t\tself.pml_layers = [mp.PML(thickness=self.dpml)]\n\n\n\t\tOD = mp.Cylinder(\n\t\t\tradius=self.OD/2,\n\t\t\theight=mp.inf,\n\t\t\taxis=mp.Vector3(0,0,1),\n\t\t\tmaterial=mp.Medium(index=self.capN)\n\t\t\t)\n\n\t\tID = mp.Cylinder(\n\t\t\tradius=(self.OD - (self.WallThick*2))/2,\n\t\t\theight=mp.inf,\n\t\t\taxis=mp.Vector3(0,0,1),\n\t\t\tmaterial=mp.Medium(index=self.fillN)\n\t\t\t)\n\n\t\tself.Objlist.extend([OD,ID])\n\n\n\tdef BuildModel(self,Plot=False,NormRun=False): # builds sim and plots structure to file \n\t\t\n\t\tkx = 0.4\n\t\tkpoint = mp.Vector3(kx)\n\n\t\tself.src = [\n\t\t\t\tmp.EigenModeSource(src=mp.GaussianSource(self.fcen,fwidth=self.df),\n\t\t\t\tcenter=mp.Vector3(x=0,y=-self.OD/2),\n\t\t\t\tsize=mp.Vector3(y=4),\n\t\t\t\tdirection=mp.X,\n\t\t\t\teig_kpoint=kpoint,\n\t\t\t\teig_band=1,\n\t\t\t\teig_parity=mp.EVEN_Y,\n\t\t\t\teig_match_freq=True\n\t\t\t\t)\n\t\t\t]\n\n\t\t\n\t\tself.sim = mp.Simulation(\n\t\t\tcell_size=self.cell_size,\n\t\t\tgeometry=self.Objlist,\n\t\t\tsources=self.src,\n\t\t\tresolution=self.res,\n\t\t\tforce_complex_fields=False,\n\t\t\teps_averaging=True,\n\t\t\tboundary_layers=self.pml_layers,\n\t\t\tprogress_interval=30,\n\t\t\tCourant=self.Courant\n\t\t\t#k_point=mp.Vector3(mp.X)\n\t\t\t)\n\n\n\t\tself.mkALLDIRS()\n\n\t\t#Stored_flux = mp.FluxRegion(center=mp.Vector3(0,self.OD/2 - 5,0), size=mp.Vector3(0,12,0))\n\t\t#self.Stored = self.sim.add_flux(self.fcen, self.df, self.nfreq, Stored_flux)\n\n\t\tstoredFlux = mp.FluxRegion(center=mp.Vector3(0,self.OD/2 - 4,0), size=mp.Vector3(0,12,0))\n\t\tself.storedFlux = self.sim.add_flux(self.fcen, self.df, self.nfreq, storedFlux)\n\n\t\tfig,ax = plt.subplots(dpi=150)\n\t\tif NormRun:\n\t\t\tself.sim.plot2D(ax=ax,eps_parameters={'alpha':0.8, 'interpolation':'none'},frequency=0)\n\t\t\tplt.savefig(self.workingDir+\"NormModel_\" + str(self.Datafile) +\".pdf\")\n\t\telse:\n\t\t\tself.sim.plot2D(ax=ax,eps_parameters={'alpha':0.8, 'interpolation':'none'},frequency=0)\n\t\t\tplt.savefig(self.workingDir+\"Model_\" + str(self.Datafile) +\".pdf\")\n\n\t\t\n\n\tdef NormRun(self):\n\n\t\tprint(\"\")\n\t\tprint(\"\")\n\t\tprint(\"Normalisation Run\")\n\t\tprint(\"\")\n\t\tprint(\"\")\n\n\t\t#while sum(mp.get_fluxes(self.tranE)) == 0.0:\n\t\t#\tprint(sum(mp.get_fluxes(self.tranE)))\n\t\t#\tprint(\"looped\")\n\t\t\n\t\tself.sim.run(\n\t\t#\t#mp.at_beginning(mp.output_epsilon),\n\t\t\t#mp.at_every(100,mp.output_efield_z),\n\t\t\tuntil=2*self.sx*self.coreN\n\t\t\t\n\t\t\t)\n\t\t#\n\n\t\tself.sim.run(\n\t\t#\t#mp.at_beginning(mp.output_epsilon),\n\t\t#\t#mp.at_every(250,mp.output_efield_z),\n\t\t\tuntil=mp.stop_when_fields_decayed(\n\t\t\t\t500,\n\t\t\t\tmp.Ez,mp.Vector3(0.5*self.sx - 0.5*self.dpml,0),self.DecayF\n\t\t\t\t)\n\t\t)\n\n\t\t\n\n\t\t\n\t\t# for normalization run, save flux fields data for reflection plane\n\t\tself.norm_refl = self.sim.get_flux_data(self.refl)\n\t\t# save incident power for transmission plane\n\t\tself.norm_tran = mp.get_fluxes(self.tranE)\n\n\n\n\tdef AutoRun(self):\n\t\t\n\t\tprint(\"\")\n\t\tprint(\"\")\n\t\tprint(\"Actual Run\")\n\t\tprint(\"\")\n\t\tprint(\"\")\n\n\t\t#self.myRunFunction(self.monitorPts)\n\t\t\n\n\t\tself.sim.run(\n\t\t\tmp.at_beginning(mp.output_epsilon),\n\t\t\tuntil_after_sources=100\n\t\t)\n\n\t\tself.sim.run(\n\t\t\t#mp.at_every(100, mp.output_efield_z), \n\t\t\tuntil=self.SimT\n\t\t)\n\n\n\n\t\t# initialize wl vs y-pos matrix.\n\t\tmatrix = np.zeros([len(self.sim.get_dft_array(self.storedFlux,mp.Ez,0)),self.nfreq],dtype=np.complex128)\n\t\t\n\t\t# fill matrix\n\t\tfor i in range(0,self.nfreq):\n\t\t\tmatrix[:,i] = self.sim.get_dft_array(self.storedFlux,mp.Ez,i)\n\n\n\t\twith open(self.workingDir + self.Datafile + \".pkl\", 'wb') as file:\n\t\t\tpickle.dump(matrix,file)\n\n\n\t\t\"\"\"\n\t\twl = []\n\t\tRs = []\n\t\tTs = []\n\t\tfor i in range(self.nfreq):\n\t\t\twl = np.append(wl, 1/flux_freqs[i])\n\t\t\tRs = np.append(Rs,-refl_flux[i]/self.norm_tran[i])\n\t\t\tTs = np.append(Ts,tran_flux[i]/self.norm_tran[i])\n\n\t\tplt.figure()\n\t\tplt.plot(wl,Rs,'--',label='reflectance')\n\t\tplt.plot(wl,Ts,label='transmittance')\n\t\tplt.plot(wl,1-Rs-Ts,label='loss')\n\t\t#plt.axis([5.0, 10.0, 0, 1])\n\t\tplt.xlabel(\"wavelength (μm)\")\n\t\tplt.legend(loc=\"upper right\")\n\t\tplt.savefig(self.workingDir+\"TransRef_\" + str(self.Datafile) +\".pdf\")\n\t\t#plt.show()\n\t\t\"\"\"\n\n\n\tdef QuickRun(self):\n\t\t\n\t\tprint(\"\")\n\t\tprint(\"\")\n\t\tprint(\"Quick Run\")\n\t\tprint(\"\")\n\t\tprint(\"\")\n\n\t\t#self.myRunFunction(self.monitorPts)\n\n\t\tself.sim.run(\n\t\t#\t#mp.at_beginning(mp.output_epsilon),\n\t\t\t#mp.at_every(100,mp.output_efield_z),\n\t\t\tuntil=2*self.sx*self.coreN\n\t\t\t\n\t\t\t)\n\n\t\tflux_freqs = mp.get_flux_freqs(self.refl)\n\t\trefl_flux = mp.get_fluxes(self.refl)\n\t\ttran_flux = mp.get_fluxes(self.tranE)\n\t\t\n\t\twl = []\n\t\tRs = []\n\t\tTs = []\n\t\tfor i in range(self.nfreq):\n\t\t\twl = np.append(wl, 1/flux_freqs[i])\n\t\t\tRs = np.append(Rs,-refl_flux[i])\n\t\t\tTs = np.append(Ts,tran_flux[i])\n\n\t\tplt.figure()\n\t\tplt.plot(wl,Rs,'--',label='reflectance')\n\t\tplt.plot(wl,Ts,label='transmittance')\n\t\tplt.plot(wl,1-Rs-Ts,label='loss')\n\t\t#plt.axis([5.0, 10.0, 0, 1])\n\t\tplt.xlabel(\"wavelength (μm)\")\n\t\tplt.legend(loc=\"upper right\")\n\t\tplt.savefig(self.workingDir+\"TransRef_\" + str(self.Datafile) +\".pdf\")\n\t\t#plt.show()\n\n\n\tdef TimestepFields(self):\n\t\t\n\t\tfig,axes = plt.subplots(1, 1,dpi=200)\n\n\t\tself.sim.run(\n\t\t\tuntil=self.SimT\n\t\t\t)\n\t\t\n\t\t#self.sim.plot2D(fields=mp.Ez,plot_sources_flag=True,plot_monitors_flag=True)\n\t\tself.sim.plot2D(\n\t\t\tax = axes,\n\t\t\t#output_plane=mp.Volume(center=mp.Vector3(),size=mp.Vector3(self.SimSize,self.SimSize)),\n\t\t\tfields=mp.Ez,\n\t\t\tplot_sources_flag=True,\n\t\t\tplot_monitors_flag=True,\n\t\t\tplot_eps_flag=True,\n\t\t\teps_parameters={'alpha':0.8, 'interpolation':'none','cmap':'binary','contour':True}\n\t\t\t)\n\t\tplt.show()\n\n\n\tdef SaveMeta(self):\n\t\t\n\t\tmetadata = {\n\t\t\"Runtime\":self.Runtime,\n\t\t\"Chunks\":self.sim.num_chunks,\n\t\t##Material N\n\t\t\"nCoating\": self.nCoating,\n\t\t\"CoreN\":self.coreN,\n\t\t\"CladN\":self.cladN,\n\t\t##Fibre Dimentions\n\t\t\"R1\":self.R1,\n\t\t\"R2\":self.R2,\n\t\t\"CladLeft\":self.CladLeft,\n\t\t##Resonator Dimentions\n\t\t\"Depth\":self.Depth,\n\t\t\"Width\":self.Width,\n\t\t\"GAP\":self.GAP,\n\t\t\"Rw\":self.Rw,\n\t\t##Src properties\n\t\t\"fcen\":self.fcen,\n\t\t\"df\":self.df, \n\t\t\"nfreq\":self.nfreq,\n\n\t\t##MEEP properties\n\t\t\"dpml\":self.dpml,\n\t\t\"resolution\":self.res,\n\t\t\"DecayF\":self.DecayF,\n\t\t\"WallT\":self.WallT,\n\t\t\"SimT\":self.SimT,\n\t\t\"today\":self.today,\n\t\t\"WorkingDir\":self.workingDir,\n\t\t\"filename\":self.filename,\n\t\t\"notes\":self.Notes,\n\n\t\t\"sx\":self.sx,\n\t\t\"sy\":self.sy\n }\n\n\t\t\n\t\twith open(self.workingDir + str(self.sim.num_chunks) + '_metadata.json', 'w') as file:\n\t\t\tjson.dump(metadata, file)\n\n\n\n\n\tdef dumpData2File(self):\n \n\t\t# initialise main data dictionary\n\t\tData = {}\n\t\t\n\t\t\n\t\tData['Src']['lambda'] = 1/np.array(mp.get_flux_freqs(self.srcE))\n\t\tData['Src']['flux'] = np.array(mp.get_fluxes(self.srcE))\n\t\t\n\t\tData['Out']['lambda'] = 1/np.array(mp.get_flux_freqs(self.tranE))\n\t\tData['Out']['flux'] = np.array(mp.get_fluxes(self.tranE))\n\t\t\n\n\t\tmetadata = {\n\t\t\t\"date\": str(self.today),\n\t\t\t\"Data\": self.workingDir+\"Data.pk1\"\n\t\t}\n\n\t\tmetadata = {**metadata,**self.meta}\n\t\t\n\t\tData = {}\n\t\tData['Src'] = {} # sensor just after source.\n\t\tData['Out'] = {} # sensor at the end of the WG (for transmission)\n\n\t\twith open(metadata['Data'], 'wb') as file:\n\t\t\tpickle.dump(Data,file)\n\n\n\t\twith open(self.workingDir + 'metadata.json', 'w') as file:\n\t\t\tjson.dump(metadata, file)\n\n\n\tdef pltModel(self,Plt):\n\t\tplt.figure(dpi=200)\n\t\tself.sim.plot2D(eps_parameters={'alpha':0.8, 'interpolation':'none'})\n\t\tif Plt:\n\t\t\tplt.show()\n\t\tplt.savefig(self.workingDir+\"Model.pdf\")\n\n\n\tdef TicTocGenerator(self):\n\t\t# Generator that returns time differences\n\t\tti = 0 # initial time\n\t\ttf = time.time() # final time\n\t\twhile True:\n\t\t\tti = tf\n\t\t\ttf = time.time()\n\t\t\tyield tf-ti # returns the time difference\n\n\n\t# This will be the main function through which we define both tic() and toc()\n\tdef toc(self,tempBool=True):\n\t\t# Prints the time difference yielded by generator instance TicToc\n\t\tself.Runtime = next(self.TicToc)\n\t\t \n\n\tdef tic(self):\n\t\tself.Runtime = 0\n\t\t# Records a time in TicToc, marks the beginning of a time interval\n\t\tself.toc(False)\n\n\tdef PDMSindex(self):\n\n\t\tself.PDMStemp = np.array([27.04200613, 30.04708872, 40.09978324, 50.0485836, 60.10202556, 70.05194708, 80.00074744])\n\t\tself.nPDMS = np.array([1.410413147,1.409271947,1.405629718,1.4019877,1.398453453,1.394973372,1.391331424])\n\t\tself.PDMSfit = np.polyfit(self.PDMStemp,self.nPDMS,deg=1)\n\n\n\n\n\tdef Silicaindex(self):\n\t\t\n\t\tself.Silicatemp = np.array([22.83686643,40.36719542,70.32692845,103.3346833])\n\t\tself.nSilica = np.array([1.445300107,1.44555516,1.445847903,1.445958546])\n\t\tself.SilicaFIT = np.polyfit(self.Silicatemp,self.nSilica,deg=1)\n\n\n\n\tdef addtriBubbles(self):\n\n\t\tRW = self.Rw\n\t\tTL = self.Width\n\n\t\tverts = [\n\t \n\t mp.Vector3(x=-(RW/2+TL/2) ,y=self.R2 ,z=0) ,\n\t mp.Vector3(x=(RW/2+TL/2) ,y=self.R2 ,z=0) ,\n\t mp.Vector3(x=(RW/2) ,y=self.R2-self.Depth ,z=0) ,\n\t mp.Vector3(x=-(RW/2) ,y=self.R2-self.Depth ,z=0)\n\t \n\t ]\n\n\n\t\tself.LH = mp.Prism(center=mp.Vector3(x=-self.GAP/2,y=0,z=0),\n\t vertices = verts,\n\t material=mp.Medium(index=self.PDMSn),\n\t height=1\n\t )\n\n\t\tself.RH = mp.Prism(center=mp.Vector3(x=self.GAP/2,y=0,z=0),\n\t vertices = verts,\n\t material=mp.Medium(index=self.PDMSn),\n\t height=1\n\t )\n\n\t\tself.Objlist.extend([self.LH,self.RH])\n\t\t","repo_name":"deyh2020/MEEP_Models","sub_path":"WGMs/LoadedCapillary/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":10864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25234778023","text":"from __future__ import print_function\nimport pickle\nfrom game import LineGame\nfrom learning.game_tree import FullGameTree\n\ndef game_tree_3x3():\n game = LineGame(3,3)\n start_state = game.get_state_hash()\n tree = FullGameTree()\n tree.dfs(game)\n pickle.dump(tree.state_val, open('data/state_val_3x3.pkl', 'wb'))\n print('\\n', len(tree.state_val))\n print(start_state, tree.state_val[start_state])\n for move in game.get_valid_moves():\n game.check_move(move)\n print(game.game_state, tree.state_val[game.get_state_hash()])\n game.delete_move()\n\nif __name__ == '__main__':\n game_tree_3x3()","repo_name":"seal256/gomoku","sub_path":"learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10738639561","text":"import argparse\nimport requests\nimport sys\nimport re\nimport pandas as pd\nimport itertools\n\nfrom sqlalchemy import and_\n\nsys.path.insert(0, '../')\nfrom db import db # nopep8\nfrom app import app # nopep8\n\nfrom models.element import ElementModel, ElementPriceModel # nopep8\nfrom models.part import PartModel # nopep8\n\n\ndef toDesignId(nbr):\n if nbr.find('-1') > -1:\n return nbr\n else:\n p = re.search(r'^\\d{4,5}', nbr)\n if p is not None:\n return p.group()\n else:\n return None\n\n\ndef insertPrice(session, id, provider_id, price):\n print(\n 'Insert element %s' % id\n )\n session.add(ElementPriceModel(\n element_id=id,\n provider_id=provider_id,\n price=price\n ))\n\n\nparser = argparse.ArgumentParser(\n description='Get part prices.'\n)\nparser.add_argument('--max_items', dest='max_items', type=int, default=10,\n help='max sets to process')\n\nmax_items = parser.parse_args().max_items\n\nurl = 'https://bricksandpieces.services.lego.com/api/v1/bricks/%s/%s'\n\nparams = dict()\nparams['country'] = 'CH'\nparams['orderType'] = 'buy'\n\nheaders = dict()\nheaders['x-api-key'] = 'saVSCq0hpuxYV48mrXMGfdKnMY1oUs3s'\n\n\ndb.init_app(app)\nwith app.app_context():\n element_prices = db.session.query(ElementPriceModel.element_id)\n\n element_list = db.session.query(\n PartModel.part_num,\n ElementModel.element_id\n ).join(\n ElementModel,\n PartModel.id == ElementModel.part_id,\n isouter=True\n ).filter(and_(\n PartModel.part_num.notlike('0%'),\n PartModel.part_cat_id.notin_([58]),\n ElementModel.element_id != None, # nopep8\n ElementModel.element_id.notin_(element_prices)\n )).distinct().limit(max_items).all()\n\n element_list = sorted(element_list, key=lambda x: x[0])\n element_dict = {\n toDesignId(k): [p1[1] for p1 in p]\n for k, p in itertools.groupby(element_list, lambda x: x[0])\n }\n i = 0\n for ele in element_dict.keys():\n if i < max_items and ele is not None:\n resp = requests.get(\n url % ('items', ele),\n headers=headers,\n params=params\n )\n\n if resp.status_code == 200 and 'bricks' in resp.json():\n df = pd.DataFrame(resp.json()['bricks'])\n db_element_ids = element_dict[ele]\n for index, row in df.iterrows():\n if row['itemNumber'] in db_element_ids:\n db_element_ids.remove(row['itemNumber'])\n price_id = db.session.query(ElementPriceModel.id).filter(\n ElementPriceModel.element_id == row['itemNumber']\n ).first()\n\n if price_id is None:\n i += 1\n insertPrice(\n db.session,\n row['itemNumber'],\n 1,\n int(float(row['price']['amount']) * 100)\n )\n\n for db_id in db_element_ids:\n price_id = db.session.query(ElementPriceModel.id).filter(\n ElementPriceModel.element_id == db_id\n ).first()\n\n if price_id is None:\n insertPrice(\n db.session,\n db_id,\n 1,\n -1\n )\n\n elif resp.status_code == 204:\n print('Element %s not found' % ele)\n for ele2 in element_dict[ele]:\n insertPrice(\n db.session,\n ele2,\n 1,\n -1\n )\n else:\n print('Another problem: %d' % resp.status_code)\n\n if None in element_dict.keys():\n for ele2 in element_dict[None]:\n insertPrice(\n db.session,\n ele2,\n 1,\n -1\n )\n\n db.session.commit()\n db.session.close()\n","repo_name":"steve84/brick_eval","sub_path":"api/scripts/get_part_prices.py","file_name":"get_part_prices.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7997473067","text":"from time import sleep\nimport numpy as np\nimport gym\n\nfrom policies.ppo_with_body_info import PPO_with_body_info\nfrom _train_utils.load_dataset import load_dataset\nfrom _train_utils.eval_environment import get_saved_hyperparams, create_test_env\nfrom utils import output, read_yaml\nimport arguments\nargs = arguments.get_args_eval()\n\ndef eval_model(seed=0):\n conf = read_yaml(args.conf_yml)\n test_bodies = conf[\"test_bodies\"]\n output(f\"test_bodies: {test_bodies}\", 1)\n\n env_id, files, params, body_ids = load_dataset(args.dataset)\n\n hyperparams, stats_path = get_saved_hyperparams(args.stats_path, norm_reward=False, test_mode=True)\n # HACK\n hyperparams[\"normalize\"] = True\n\n env_kwargs = {\n \"xml\": files[args.body_id],\n \"param\": params[args.body_id],\n \"max_episode_steps\": args.n_timesteps+1,\n \"render\": args.render,\n }\n env = create_test_env(\n env_id,\n n_envs=1,\n stats_path=stats_path,\n seed=seed,\n log_dir=\"tmp/\",\n should_render=False,\n hyperparams=hyperparams,\n env_kwargs=env_kwargs,\n )\n kwargs = dict(seed=seed)\n model = PPO_with_body_info.load(args.model_zip, env=env, **kwargs)\n obs = env.reset()\n state = None\n episode_reward = 0\n ep_len = 0\n body_x_record = []\n for _step in range(args.n_timesteps):\n action, state = model.predict(obs, state=state, deterministic=True)\n if isinstance(env.action_space, gym.spaces.Box):\n action = np.clip(action, env.action_space.low, env.action_space.high)\n body_x = env.envs[0].robot.body_xyz[0]\n obs, reward, done, infos = env.step(action)\n episode_reward += reward[0]\n ep_len += 1\n if args.render:\n sleep(0.01)\n if done:\n break\n body_x_record.append(body_x)\n obs = env.close()\n\nif __name__ == \"__main__\":\n args.dataset = \"dataset/walker2d_20_10-v0\"\n args.conf_yml = \"../results/exp_multi_0_bodies.yml\"\n args.stats_path = \"../results/logs/multi_body/i0_s2100000/walker2d_20_10-v0_1/walker2d_20_10-v0\"\n args.model_zip = \"../results/logs/multi_body/i0_s2100000/walker2d_20_10-v0_1/best_model.zip\"\n args.n_timesteps = 300\n args.render = True\n\n for i in range(10):\n args.body_id = i\n eval_model()","repo_name":"liusida/thesis-bodies","sub_path":"a_policy_with_my_face/src/_eval.py","file_name":"_eval.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27379240552","text":"import json\n\nimport requests\n\nfrom src.config import config\nfrom src.forwarder.basic import BasicForwarder\nfrom src.forwarder.payload import Data, DataRow\n\ncfg = config.Config()\nforwarder = BasicForwarder(cfg.forwarder)\n\n\nclass ConfigureError(BaseException):\n pass\n\n\nclass Experiment:\n def __init__(self, uuid, csv):\n self.uuid = uuid\n self.csv = csv\n\n\nclass SystemManager:\n FP_EXCITE = {\n \"395/30\": \"LEDA\",\n \"457/35\": \"LEDB\",\n \"500/55\": \"LEDC\",\n \"523/70\": \"LEDD\",\n \"595/25\": \"LEDE\",\n \"623/30\": \"LEDF\",\n \"6500K\": \"LEDG\",\n \"Laser\": \"LASER650\",\n }\n\n FP_GAIN = {\n \"0.5x\": \"x0\",\n \"1x\": \"x1\",\n \"2x\": \"x2\",\n \"4x\": \"x3\",\n \"8x\": \"x4\",\n \"16x\": \"x5\",\n \"32x\": \"x6\",\n \"64x\": \"x7\",\n \"128x\": \"x8\",\n \"256x\": \"x9\",\n \"512x\": \"x10\",\n }\n\n def __init__(self, logger, cache, cfg):\n self.logger = logger\n self.cache = cache\n self.cache_key = cfg.CACHE_KEY\n self.chibio_server_url = cfg.CHIBIO_SERVER_URL\n\n def handle_trigger(self, trigger):\n try:\n # Configure an experiment on the Chi.Bio server\n device = self.__configure_experiment(trigger)\n # Create the experiment on the Chi.Bio server\n experiment_id = self.__create_experiment(device)\n csv = f\"{experiment_id}_data.csv\"\n\n experiment = Experiment(trigger[\"uuid\"], csv)\n # Store the experiment to be managed in the cache for tracking\n self.create(experiment)\n self.logger.info(\"experiment.created\", uuid=experiment.uuid)\n\n except ConfigureError as err:\n self.logger.error(\n \"experiment.create.failed\", uuid=trigger[\"uuid\"], error=err\n )\n\n try:\n # Forward any errors upstream\n data = Data()\n data.error = str(err)\n forwarder.forward(trigger[\"uuid\"], DataRow(data))\n self.logger.info(\"configure.error.forwarded\", uuid=trigger[\"uuid\"])\n\n except Exception as err:\n self.logger.error(\n \"configure.error.forward.failed\", uuid=trigger[\"uuid\"], error=err\n )\n\n except Exception as err:\n self.logger.error(\n \"experiment.create.failed\", uuid=trigger[\"uuid\"], error=err\n )\n\n def create(self, experiment):\n self.cache.hset(self.cache_key, experiment.csv, experiment.uuid)\n\n def get(self, csv):\n uuid = self.cache.hget(self.cache_key, csv)\n if uuid is None:\n raise ValueError(f\"could not find managed CSV file: {csv}\")\n\n return uuid\n\n def __configure_experiment(self, trigger):\n # Extract the source from the trigger metadata\n if \"metadata\" in trigger:\n source = trigger[\"metadata\"][\"source\"]\n else:\n source = trigger[\"uuid\"]\n\n spec = trigger[\"spec\"]\n data = {}\n\n # Use the trigger spec to build the experiment configuration data\n # TODO: Handle more cases\n if \"od\" in spec:\n data[\"OD\"] = {\"target\": spec[\"od\"]}\n\n if \"volume\" in spec:\n data[\"Volume\"] = {\"target\": spec[\"volume\"]}\n\n if \"thermostat\" in spec:\n data[\"Thermostat\"] = {\"target\": spec[\"thermostat\"]}\n\n if \"fp1Excite\" in spec:\n data[\"FP1\"] = {\"ON\": 1}\n\n led = SystemManager.FP_EXCITE.get(spec[\"fp1Excite\"])\n if led is None:\n raise ConfigureError(\n f\"invalid value for fp1Excite: {spec['fp1Excite']}\"\n )\n\n data[\"FP1\"].update({\"LED\": led})\n\n if \"fp1Gain\" in spec:\n gain = SystemManager.FP_GAIN.get(spec[\"fp1Gain\"])\n if gain is None:\n raise ConfigureError(\n f\"invalid value for fp1gain: {spec['fp1Gain']}\"\n )\n else:\n raise ConfigureError(\"missing value for fp1Gain\")\n\n data[\"FP1\"].update({\"Gain\": gain})\n\n # Send the configuration data to the /sysData endpoint on the Chi.Bio server\n resp = requests.post(\n f\"{self.chibio_server_url}/sysData\",\n json={\n \"source\": source,\n \"device\": {\"M\": spec[\"devicePosition\"], \"name\": spec[\"deviceName\"]},\n \"sysData\": data,\n },\n )\n\n if resp.status_code == 422:\n body = resp.json()\n raise ConfigureError(body[\"error\"])\n else:\n resp.raise_for_status()\n\n body = resp.json()\n return body[\"device\"][\"M\"]\n\n def __create_experiment(self, device):\n # Create an experiment for the device on the Chi.Bio server\n resp = requests.post(f\"{self.chibio_server_url}/Experiment/{device}\")\n resp.raise_for_status()\n\n body = resp.json()\n return body[\"experimentID\"]\n","repo_name":"jace-ys/lab-automation","sub_path":"services/chibio-relay/src/system/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9555601834","text":"'''\nTODO:\n Improvement:\n It probably makes more sense to create a dictionary of x,y pairs and have it read from those, instead of hard\n coding in 'time'\n Use ctx and dcc.Store to prevent updates when changing date\n Color:\n Probably should update_layout wiht color instead of px.scatter\n Data selection:\n Break up Prawler from set\n Profile plotting, see Scott's email\n Improve style sheet\n Generalize plotting, so I don't need separate functions to do it.\n Date errors:\n M200 science has a bunch of dates from the 70s and 80s\n How do we deal with this\n Just drop?\n Linearly interpolate?\n\nData for archetyping:\n\n\n'''\n\nimport dash\nfrom dash import html as dhtml\nfrom dash import dcc, dash_table\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n# import plotly.graph_objects as go\nimport dash_bootstrap_components as dbc\n\n#non-plotly imports\n# from lxml import html\nimport datetime\n# from datetime import date\n# import io\n# import urllib\n\n# my imports\nimport erddap_reader as erdr\n\nprawlers = [#{'label': 'M200 Eng', 'value': 'M200Eng'},\n #{'label': 'M200 Sci', 'value': 'M200Sci'},\n {'label': 'M200 Wind', 'value': 'M200Wind'},\n {'label': 'M200 Temp/Humid', 'value': 'M200ATRH'},\n {'label': 'M200 Baro', 'value': 'M200Baro'}\n ]\n\n\ndataset_dict = {\n #'M200Eng': 'https://data.pmel.noaa.gov/engineering/erddap/tabledap/TELOM200_PRAWE_M200.csv',\n #'M200Sci': 'https://data.pmel.noaa.gov/engineering/erddap/tabledap/TELOM200_PRAWC_M200.csv',\n 'M200Wind': 'https://data.pmel.noaa.gov/engineering/erddap/tabledap/TELOM200_WIND.csv',\n 'M200Baro': 'https://data.pmel.noaa.gov/engineering/erddap/tabledap/TELOM200_BARO.csv',\n 'M200ATRH': 'https://data.pmel.noaa.gov/engineering/erddap/tabledap/TELOM200_ATRH.csv'\n }\n\n\nskipvars = ['time', 'Time', 'TIME', 'latitude', 'longitude', 'timeseries_id', 'profile_id', 'Epoch_Time', 'NC_GLOBAL']\n\n\n'''\n========================================================================================================================\nStart Dashboard\n'''\n\n#set_obj = Dataset(set_meta['Eng']['url'])\nstarting_set = 'M200Wind'\n\ngraph_config = {'modeBarButtonsToRemove' : ['hoverCompareCartesian','select2d', 'lasso2d'],\n 'doubleClick': 'reset+autosize', 'toImageButtonOptions': { 'height': None, 'width': None, },\n 'displaylogo': False}\n\ncolors = {'background': '#111111', 'text': '#7FDBFF'}\n\nexternal_stylesheets = ['https://codepen.io./chriddyp/pen/bWLwgP.css']\n\nvariables_card = dbc.Card(\n [#dbc.CardHeader(\"Tools\"),\n dbc.CardBody(\n dcc.Dropdown(\n id=\"select_var\",\n # style={'backgroundColor': colors['background']},\n # 'textColor': colors['text']},\n # options=dataset_dict[starting_set].ret_vars(),\n # value=dataset_dict[starting_set].ret_vars()[0]['value'],\n clearable=False\n ),\n )],\n color='dark'\n)\n\nset_card = dbc.Card([\n dbc.CardBody(\n dcc.Dropdown(\n id=\"select_eng\",\n # style={'backgroundColor': colors['background']},\n # style={'backgroundColor': colors['background']},\n options=prawlers,\n value=prawlers[0]['value'],\n clearable=False\n )\n )\n])\n\ndate_card = dbc.Card([\n dbc.CardBody(\n dcc.DatePickerRange(\n id='date-picker',\n style={'backgroundColor': colors['background']},\n # min_date_allowed=dataset_dict[starting_set].t_start.date(),\n # max_date_allowed=dataset_dict[starting_set].t_end.date(),\n # start_date=(dataset_dict[starting_set].t_end - datetime.timedelta(days=14)).date(),\n # end_date=dataset_dict[starting_set].t_end.date(),\n # display_format='MMMM Y, DD',\n # start_date_placeholder_text='MMMM Y, DD'\n ),\n )\n])\n\ntable_card = dbc.Card([\n dbc.CardBody(\n children=[\n dcc.Loading(children=[\n dcc.Textarea(id='t_mean',\n value='',\n readOnly=True,\n style={'width': '100%', 'height': 40,\n 'backgroundColor': colors['background'],\n 'textColor': colors['text']},\n ),\n dash_table.DataTable(id='table',\n style_table={'backgroundColor': colors['background'],\n 'height' :'300px',\n 'overflowY' :'auto'},\n #'overflow' : 'scroll'},\n style_cell={'backgroundColor': colors['background'],\n 'textColor': colors['text']}\n )\n ])\n ])\n])\n\ngraph_card = dbc.Card(\n [\n dcc.Loading(\n dbc.CardBody([dcc.Graph(id='graph')\n ])\n )\n ]\n)\n\n\napp = dash.Dash(__name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n #requests_pathname_prefix='/prawler/m200/',\n external_stylesheets=[dbc.themes.SLATE])\n#server = app.server\n\napp.layout = dhtml.Div([\n #dbc.Container([\n dbc.Row([dhtml.H1('Prawler M200')]),\n dbc.Row([\n dbc.Col(graph_card, width=9),\n dbc.Col(children=[date_card,\n set_card,\n variables_card,\n table_card],\n width=3)\n ])\n # ])\n])\n\n\n'''\n========================================================================================================================\nCallbacks\n'''\n\n#engineering data selection\n@app.callback(\n [Output('select_var', 'options'),\n Output('date-picker', 'min_date_allowed'),\n Output('date-picker', 'max_date_allowed'),\n Output('date-picker', 'start_date'),\n Output('date-picker', 'end_date'),\n Output('select_var', 'value')],\n Input('select_eng', 'value'))\n\ndef change_prawler(dataset):\n \n set_obj = erdr.Dataset(dataset_dict[dataset])\n\n min_date_allowed = set_obj.t_start.date(),\n max_date_allowed = set_obj.t_end.date(),\n start_date = (set_obj.t_end - datetime.timedelta(days=14)).date(),\n end_date = set_obj.t_end.date()\n\n vars = set_obj.gen_drop_vars(skips=['time', 'latitude', 'longitude', 'timeseries_id', 'NC_GLOBAL'])\n first_var = vars[0]['value']\n\n\n return vars, str(min_date_allowed[0]), str(max_date_allowed[0]), str(start_date[0]), str(end_date), first_var\n\n#engineering data selection\n@app.callback(\n [Output('graph', 'figure'),\n Output('table', 'data'),\n Output('table', 'columns'),\n Output('t_mean', 'value')],\n [Input('select_eng', 'value'),\n Input('select_var', 'value'),\n Input('date-picker', 'start_date'),\n Input('date-picker', 'end_date')\n ])\n\ndef plot_evar(dataset, select_var, start_date, end_date):\n '''\n :param dataset:\n :param select_var:\n :return:\n\n '''\n\n #ctx = dash.callback_context\n\n # if len(ctx.triggered) == 1:\n #\n # if 'date-picker' in ctx.triggered[0]['prop_id']:\n\n set_obj = erdr.Dataset(dataset_dict[dataset])\n data = set_obj.get_data(True, [select_var])\n new_data = set_obj.ret_windowed_data(t_start=start_date, t_end=end_date)\n # new_data = set_obj.ret_data()\n\n t_mean = ''\n\n #new_data = set_obj.ret_data(start_date, end_date)\n\n # colorscale = 'Blues'\n #\n # print(dataset)\n #\n # if dataset in ['TELONAS2Gen', 'M200Sci']:\n colorscale = px.colors.sequential.Viridis\n\n if select_var == 'trips_per_day':\n\n trip_set = set_obj.trips_per_day(start_date, end_date)\n efig = px.scatter(trip_set, y='ntrips', x='days')#, color=\"sepal_length\", color_continuous_scale=colorscale)\n\n columns = [{\"name\": 'Day', \"id\": 'days'},\n {'name': select_var, 'id': 'ntrips'}]\n\n t_mean = \"Mean Trips per day: \" + str(trip_set['ntrips'].mean())\n\n try:\n table_data = trip_set.to_dict('records')\n except TypeError:\n table_data = trip_set.to_dict()\n\n elif select_var == 'errs_per_day':\n\n err_set = set_obj.errs_per_day(start_date, end_date)\n efig = px.scatter(err_set, y='nerrors', x='days')#, color=\"sepal_length\", color_continuous_scale=colorscale)\n\n columns = [{\"name\": 'Day', \"id\": 'days'},\n {'name': select_var, 'id': 'nerrors'}]\n\n t_mean = 'Mean errors per day ' + str(err_set['nerrors'].mean())\n\n try:\n table_data = err_set.to_dict('records')\n except TypeError:\n table_data = err_set.to_dict()\n\n elif select_var == 'sci_profs':\n\n sci_set = set_obj.sci_profiles_per_day(start_date, end_date)\n efig = px.scatter(sci_set, y='ntrips', x='days')#, color=\"sepal_length\", color_continuous_scale=colorscale)\n\n columns = [{\"name\": 'Day', \"id\": 'days'},\n {'name': select_var, 'id': 'ntrips'}]\n\n t_mean = 'Mean errors per day ' + str(sci_set['ntrips'].mean())\n\n try:\n table_data = sci_set.to_dict('records')\n except TypeError:\n table_data = sci_set.to_dict()\n\n #elif select_var in list(new_data.columns):\n\n else:\n efig = px.scatter(new_data, y=select_var, x='time')#, color=\"sepal_length\", color_continuous_scale=colorscale)\n\n columns = [{\"name\": 'Date', \"id\": 'datetime'},\n {'name': select_var, 'id': select_var}]\n\n try:\n t_mean = 'Average ' + select_var + ': ' + str(new_data.loc[:, select_var].mean())\n except TypeError:\n t_mean = ''\n\n try:\n table_data = new_data.to_dict('records')\n except TypeError:\n table_data = new_data.to_dict()\n\n if 'depth' in select_var.lower():\n\n efig['layout']['yaxis']['autorange'] = \"reversed\"\n\n efig.update_layout(\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text'],\n )\n\n # efig.style(\n # height=700\n # )\n\n return efig, table_data, columns, t_mean\n\n\nif __name__ == '__main__':\n #app.run_server(host='0.0.0.0', port=8050, debug=True)\n app.run_server(debug=True)","repo_name":"shjewellEDD/dashboard_toolbox","sub_path":"dashboard_archetype.py","file_name":"dashboard_archetype.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72396372278","text":"from django.shortcuts import render,get_object_or_404, redirect\nfrom .forms import ContactoForm\nfrom .models import Contacto\n\n\n\ndef index(request):\n contactos = Contacto.objects.all()\n \n if request.method == 'POST':\n form = ContactoForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n if Contacto.objects.filter(email=email).exists():\n form.add_error('email', 'Este email ya esta siendo usado')\n else:\n form.save()\n else:\n form = ContactoForm()\n \n return render(request, 'index.html', {'form': form, 'contactos': contactos})\n \ndef borrar_contacto(request, contacto_id):\n contacto = get_object_or_404(Contacto, id=contacto_id)\n contacto.delete()\n return redirect('index')\n\ndef editar_contacto(request, contacto_id):\n contacto = get_object_or_404(Contacto, id=contacto_id)\n \n if request.method == 'POST':\n form = ContactoForm(request.POST, instance=contacto)\n if form.is_valid():\n email = form.cleaned_data['email']\n if Contacto.objects.filter(email=email).exclude(id=contacto_id).exists():\n form.add_error('email', 'Este email ya está siendo usado')\n else:\n form.save()\n return redirect('index')\n else:\n form = ContactoForm(instance=contacto)\n \n return render(request, 'editar_contacto.html', {'form': form, 'contacto_id': contacto_id})","repo_name":"Sarabe89/django","sub_path":"Gestor_Personal/Gestor_de_Contactos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"11600434859","text":"def removeDuplicates(Array, n):\n temp = list(range(n))\n j = 0\n for i in range(0, n-1):\n if Array[i] != Array[i+1]:\n temp[j] = Array[i]\n j += 1\n temp[j] = Array[n-1]\n j += 1\n for i in range(0, j):\n Array[i] = temp[i]\n return temp[:j]\nA = [1, 2, 2, 2, 6, 6, 7, 8, 10, 10]\nB = [2, 2, 6, 8, 10, 10, 10, 10, 10, 10]\nB = removeDuplicates(B, len(B)) # O(n)\nprint(B)\nintersection = []\nj = 0\nfor i in range(0,len(A)): # O(n)\n if(A[i] == B[j]):\n intersection.append(A[i])\n j += 1\n elif(A[i] < B[j]):\n continue\n else:\n while(A[i] > B[j]):\n j += 1\n if(A[i] == B[j]):\n intersection.append(A[i])\n j += 1\n break","repo_name":"Freeassassin/ECE358","sub_path":"HW02/Problem3a.py","file_name":"Problem3a.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6321374574","text":"from pycdas.kernels.Kernel import CDMSKernel, Kernel, KernelSpec\nfrom pycdas.cdasArray import cdmsArray\nimport cdms2, time, os, cdutil\nfrom pycdas.messageParser import mParse\nimport numpy as np\n\nclass AverageKernel(CDMSKernel):\n\n def __init__( self ):\n Kernel.__init__( self, KernelSpec(\"ave\", \"Average\", \"Averages the inputs using UVCDAT with area weighting by default\", handlesInput=True ) )\n self._debug = False\n\n def executeOperation(self, task, _input):\n self.logger.info( \"Executing AverageKernel, input metadata = \" + str(_input.metadata) )\n dset_address = _input.metadata.get(\"uri\", _input.metadata.get(\"dataPath\") )\n vname = _input.metadata.get(\"name\")\n dset = cdms2.open( dset_address )\n selector = _input.getSelector( dset[vname] )\n self.logger.info( \"exec *EXT* AverageKernel, selector: \" + str( selector ) )\n variable = dset( vname, **selector )\n axes = task.metadata.get(\"axis\",\"xy\")\n# weights = task.metadata.get( \"weights\", \"\" ).split(\",\")\n# if weights == [\"\"]: weights = [ (\"generate\" if( axis == 'y' ) else \"equal\") for axis in axes ]\n weights = task.metadata.get(\"weights\",\"generate\").split(\",\")\n if( len(weights) == 1 ): weights = weights[0]\n action = task.metadata.get(\"action\",\"average\")\n returned = 0\n result_var = cdutil.averager( variable, axis=axes, weights=weights, action=action, returned=returned )\n self.logger.info( \"Computed result, input shape = \" + str(variable.shape) + \", output shape = \" + str(result_var.shape))\n rv = self.createResult( result_var, _input, task )\n self.logger.info( \"Result data, shape = \" + str(result_var.shape) + \", data = \" + np.array_str( rv.array() ) )\n return rv\n\nclass ZonalAverageDemo(CDMSKernel):\n\n def __init__( self ):\n Kernel.__init__( self, KernelSpec(\"zaDemo\", \"ZonalAverageDemo\", \"Zonal average from -90 to 90\", handlesInput=True ) )\n self._debug = False\n\n def executeOperation(self, task, _input):\n self.logger.info( \"Executing AverageKernel, input metadata = \" + str(_input.metadata) )\n dset_address = _input.metadata.get(\"uri\", _input.metadata.get(\"dataPath\") )\n vname = _input.metadata.get(\"name\")\n dset = cdms2.open( dset_address )\n selector = _input.getSelector( dset[vname] )\n self.logger.info( \"exec *EXT* AverageKernel, selector: \" + str( selector ) )\n variable = dset( vname, **selector )\n axisIndex = variable.getAxisIndex( 'longitude' )\n\n cdutil.times.setTimeBoundsMonthly(variable)\n djfclimatology = cdutil.times.DJF.climatology(variable)\n zonalAve = cdutil.averager( djfclimatology, axis=axisIndex, weights='equal' )\n\n return self.createResult( zonalAve, _input, task )\n","repo_name":"nasa-nccs-cds/CDAS2","sub_path":"python/src/pycdas/kernels/internal/cdmsExt.py","file_name":"cdmsExt.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"29782260597","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nchrom_driver_path = \"D:\\chromDriver\\chromedriver.exe\"\ndriver = webdriver.Chrome(executable_path=chrom_driver_path)\ndriver.get(\"https://en.wikipedia.org/wiki/Main_Page\")\n# number = driver.find_element_by_xpath('//*[@id=\"articlecount\"]/a[1]')\n# number.click()\nsearch = driver.find_element_by_name(\"search\")\nsearch.send_keys(\"Python\")\nsearch.send_keys(Keys.ENTER)\n","repo_name":"elahekarimi/python-learning","sub_path":"Day48/intraction.py","file_name":"intraction.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3374778159","text":"\"\"\"\nre.compile()\n\"\"\"\n\nimport re\n\nmystr = '39801 356, 2102 1111'\n\n# Three-digit number followed by space followed by two-digit number\npattern = r'\\d{3} \\d{2}'\n\n# directly compile a regex strin\n# res = re.compile(pattern)\n# print(res, type(res))\n\nmatch_obj = re.compile(pattern).match(mystr, 2)\nprint(match_obj)\n\n\n\n","repo_name":"edu-athensoft/ceit4101python","sub_path":"stem1400_modules/module_14_regex/s3_re/regex_17_match.py","file_name":"regex_17_match.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24194861680","text":"#!/usr/bin/env python\nfrom collections import namedtuple\nfrom os import path\nimport argparse\nimport random\nimport time\n\nimport pygame as pg\nfrom pygame import midi\n\nBLACK = (0, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 128)\nBACKGROUND_COLOR = BLACK\nNOTES_RANGE = (36, 96)\nNOTE_SPEED = 50\nWINDOW_SIZE = (150, 30)\n\ndef asset_path(name):\n return path.join(path.dirname(path.realpath(__file__)), \"assets\", name)\n\n\nclass WantQuit(Exception):\n pass\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', dest='midi_in_id', default=3, type=int,\n help=\"midi input device id\")\n parser.add_argument('-o', dest='midi_out_id', default=0, type=int,\n help=\"midi output device id\")\n return parser.parse_args()\n\n\ndef note2mynoterepr(note):\n index = note - 36\n octave = index // 12 + 2\n offset = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\",\n \"B\"][index % 12]\n\n snote = [\"C{}\", \"C{}#\", \"D{}\", \"D{}#\", \"E{}\", \"F{}\", \"F{}#\", \"G{}\", \"G{}#\",\n \"A{}\", \"A{}#\", \"B{}\", \"B{}#\"][index % 12].format(octave)\n return f\"{snote} - {octave}{offset}\"\n\n\nclass RNWindow(object):\n def __init__(self, midiin_id, midiout_id):\n self.midiin_id = midiin_id\n self.midiout_id = midiout_id\n pg.init()\n pg.fastevent.init()\n midi.init()\n self._screen = pg.display.set_mode(WINDOW_SIZE)\n self._clock = pg.time.Clock()\n self._midin = midi.Input(midiin_id)\n self._midout = midi.Output(midiout_id)\n self._font = pg.font.Font('freesansbold.ttf', 32)\n self.note = 0\n self.render_text(\"\")\n\n def render_text(self, text):\n self._text_surf = self._font.render(text, True, GREEN, BLACK)\n\n def draw(self):\n self._screen.fill(BACKGROUND_COLOR)\n self._screen.blit(self._text_surf, (0, 0))\n pg.display.update()\n\n def update(self):\n pass\n\n def quick_play(self):\n self._midout.note_on(self.note, NOTE_SPEED)\n time.sleep(0.1)\n self._midout.note_on(self.note, NOTE_SPEED)\n\n def draw_note(self):\n pg.draw.circle(self._screen, BLACK, NOTES.get(self._note).pos, 5)\n\n def next_note(self):\n self._midout.note_off(self.note, NOTE_SPEED)\n self.note = random.randint(*NOTES_RANGE)\n self.render_text(note2mynoterepr(self.note))\n self._midout.note_on(self.note, NOTE_SPEED)\n\n def process_events(self):\n if self._midin.poll():\n rawevents = self._midin.read(10)\n mevents = midi.midis2events(rawevents, self.midiin_id)\n for mevent in mevents:\n pg.fastevent.post(mevent)\n\n for event in pg.fastevent.get():\n if event.type == midi.MIDIIN:\n print(event)\n if event.data1 == self.note:\n self.quick_play()\n self.next_note()\n if event.type == pg.QUIT:\n raise WantQuit()\n\n def quit(self):\n self._midout.note_off(self.note, NOTE_SPEED)\n pg.quit()\n\n def run(self):\n self.next_note()\n while True:\n self.process_events()\n self.update()\n self.draw()\n self._clock.tick(20)\n\n\ndef main():\n try:\n args = parse_args()\n win = RNWindow(args.midi_in_id, args.midi_out_id)\n win.run()\n except WantQuit:\n win.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EmilioPeJu/randomnote","sub_path":"randomnote/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71952138036","text":"\"\"\"Event loop and event loop policy.\"\"\"\n\n__all__ = (\n 'AbstractEventLoopPolicy',\n 'AbstractEventLoop', 'AbstractServer',\n 'Handle', 'TimerHandle',\n 'get_event_loop_policy', 'set_event_loop_policy',\n 'get_event_loop', 'set_event_loop', 'new_event_loop',\n 'get_child_watcher', 'set_child_watcher',\n '_set_running_loop', 'get_running_loop',\n '_get_running_loop',\n)\n\nimport contextvars\nimport os\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport threading\n\nfrom . import format_helpers\n\n\nclass Handle:\n \"\"\"Object returned by callback registration methods.\"\"\"\n\n __slots__ = ('_callback', '_args', '_cancelled', '_loop',\n '_source_traceback', '_repr', '__weakref__',\n '_context')\n\n def __init__(self, callback, args, loop, context=None):\n if context is None:\n context = contextvars.copy_context()\n self._context = context\n self._loop = loop\n self._callback = callback\n self._args = args\n self._cancelled = False\n self._repr = None\n if self._loop.get_debug():\n self._source_traceback = format_helpers.extract_stack(\n sys._getframe(1))\n else:\n self._source_traceback = None\n\n def _repr_info(self):\n info = [self.__class__.__name__]\n if self._cancelled:\n info.append('cancelled')\n if self._callback is not None:\n info.append(format_helpers._format_callback_source(\n self._callback, self._args))\n if self._source_traceback:\n frame = self._source_traceback[-1]\n info.append(f'created at {frame[0]}:{frame[1]}')\n return info\n\n def __repr__(self):\n if self._repr is not None:\n return self._repr\n info = self._repr_info()\n return '<{}>'.format(' '.join(info))\n\n def get_context(self):\n return self._context\n\n def cancel(self):\n if not self._cancelled:\n self._cancelled = True\n if self._loop.get_debug():\n # Keep a representation in debug mode to keep callback and\n # parameters. For example, to log the warning\n # \"Executing <Handle...> took 2.5 second\"\n self._repr = repr(self)\n self._callback = None\n self._args = None\n\n def cancelled(self):\n return self._cancelled\n\n def _run(self):\n try:\n self._context.run(self._callback, *self._args)\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException as exc:\n cb = format_helpers._format_callback_source(\n self._callback, self._args)\n msg = f'Exception in callback {cb}'\n context = {\n 'message': msg,\n 'exception': exc,\n 'handle': self,\n }\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)\n self = None # Needed to break cycles when an exception occurs.\n\n\nclass TimerHandle(Handle):\n \"\"\"Object returned by timed callback registration methods.\"\"\"\n\n __slots__ = ['_scheduled', '_when']\n\n def __init__(self, when, callback, args, loop, context=None):\n super().__init__(callback, args, loop, context)\n if self._source_traceback:\n del self._source_traceback[-1]\n self._when = when\n self._scheduled = False\n\n def _repr_info(self):\n info = super()._repr_info()\n pos = 2 if self._cancelled else 1\n info.insert(pos, f'when={self._when}')\n return info\n\n def __hash__(self):\n return hash(self._when)\n\n def __lt__(self, other):\n if isinstance(other, TimerHandle):\n return self._when < other._when\n return NotImplemented\n\n def __le__(self, other):\n if isinstance(other, TimerHandle):\n return self._when < other._when or self.__eq__(other)\n return NotImplemented\n\n def __gt__(self, other):\n if isinstance(other, TimerHandle):\n return self._when > other._when\n return NotImplemented\n\n def __ge__(self, other):\n if isinstance(other, TimerHandle):\n return self._when > other._when or self.__eq__(other)\n return NotImplemented\n\n def __eq__(self, other):\n if isinstance(other, TimerHandle):\n return (self._when == other._when and\n self._callback == other._callback and\n self._args == other._args and\n self._cancelled == other._cancelled)\n return NotImplemented\n\n def cancel(self):\n if not self._cancelled:\n self._loop._timer_handle_cancelled(self)\n super().cancel()\n\n def when(self):\n \"\"\"Return a scheduled callback time.\n\n The time is an absolute timestamp, using the same time\n reference as loop.time().\n \"\"\"\n return self._when\n\n\nclass AbstractServer:\n \"\"\"Abstract server returned by create_server().\"\"\"\n\n def close(self):\n \"\"\"Stop serving. This leaves existing connections open.\"\"\"\n raise NotImplementedError\n\n def get_loop(self):\n \"\"\"Get the event loop the Server object is attached to.\"\"\"\n raise NotImplementedError\n\n def is_serving(self):\n \"\"\"Return True if the server is accepting connections.\"\"\"\n raise NotImplementedError\n\n async def start_serving(self):\n \"\"\"Start accepting connections.\n\n This method is idempotent, so it can be called when\n the server is already being serving.\n \"\"\"\n raise NotImplementedError\n\n async def serve_forever(self):\n \"\"\"Start accepting connections until the coroutine is cancelled.\n\n The server is closed when the coroutine is cancelled.\n \"\"\"\n raise NotImplementedError\n\n async def wait_closed(self):\n \"\"\"Coroutine to wait until service is closed.\"\"\"\n raise NotImplementedError\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, *exc):\n self.close()\n await self.wait_closed()\n\n\nclass AbstractEventLoop:\n \"\"\"Abstract event loop.\"\"\"\n\n # Running and stopping the event loop.\n\n def run_forever(self):\n \"\"\"Run the event loop until stop() is called.\"\"\"\n raise NotImplementedError\n\n def run_until_complete(self, future):\n \"\"\"Run the event loop until a Future is done.\n\n Return the Future's result, or raise its exception.\n \"\"\"\n raise NotImplementedError\n\n def stop(self):\n \"\"\"Stop the event loop as soon as reasonable.\n\n Exactly how soon that is may depend on the implementation, but\n no more I/O callbacks should be scheduled.\n \"\"\"\n raise NotImplementedError\n\n def is_running(self):\n \"\"\"Return whether the event loop is currently running.\"\"\"\n raise NotImplementedError\n\n def is_closed(self):\n \"\"\"Returns True if the event loop was closed.\"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"Close the loop.\n\n The loop should not be running.\n\n This is idempotent and irreversible.\n\n No other methods should be called after this one.\n \"\"\"\n raise NotImplementedError\n\n async def shutdown_asyncgens(self):\n \"\"\"Shutdown all active asynchronous generators.\"\"\"\n raise NotImplementedError\n\n async def shutdown_default_executor(self):\n \"\"\"Schedule the shutdown of the default executor.\"\"\"\n raise NotImplementedError\n\n # Methods scheduling callbacks. All these return Handles.\n\n def _timer_handle_cancelled(self, handle):\n \"\"\"Notification that a TimerHandle has been cancelled.\"\"\"\n raise NotImplementedError\n\n def call_soon(self, callback, *args, context=None):\n return self.call_later(0, callback, *args, context=context)\n\n def call_later(self, delay, callback, *args, context=None):\n raise NotImplementedError\n\n def call_at(self, when, callback, *args, context=None):\n raise NotImplementedError\n\n def time(self):\n raise NotImplementedError\n\n def create_future(self):\n raise NotImplementedError\n\n # Method scheduling a coroutine object: create a task.\n\n def create_task(self, coro, *, name=None, context=None):\n raise NotImplementedError\n\n # Methods for interacting with threads.\n\n def call_soon_threadsafe(self, callback, *args, context=None):\n raise NotImplementedError\n\n def run_in_executor(self, executor, func, *args):\n raise NotImplementedError\n\n def set_default_executor(self, executor):\n raise NotImplementedError\n\n # Network I/O methods returning Futures.\n\n async def getaddrinfo(self, host, port, *,\n family=0, type=0, proto=0, flags=0):\n raise NotImplementedError\n\n async def getnameinfo(self, sockaddr, flags=0):\n raise NotImplementedError\n\n async def create_connection(\n self, protocol_factory, host=None, port=None,\n *, ssl=None, family=0, proto=0,\n flags=0, sock=None, local_addr=None,\n server_hostname=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None,\n happy_eyeballs_delay=None, interleave=None):\n raise NotImplementedError\n\n async def create_server(\n self, protocol_factory, host=None, port=None,\n *, family=socket.AF_UNSPEC,\n flags=socket.AI_PASSIVE, sock=None, backlog=100,\n ssl=None, reuse_address=None, reuse_port=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None,\n start_serving=True):\n \"\"\"A coroutine which creates a TCP server bound to host and port.\n\n The return value is a Server object which can be used to stop\n the service.\n\n If host is an empty string or None all interfaces are assumed\n and a list of multiple sockets will be returned (most likely\n one for IPv4 and another one for IPv6). The host parameter can also be\n a sequence (e.g. list) of hosts to bind to.\n\n family can be set to either AF_INET or AF_INET6 to force the\n socket to use IPv4 or IPv6. If not set it will be determined\n from host (defaults to AF_UNSPEC).\n\n flags is a bitmask for getaddrinfo().\n\n sock can optionally be specified in order to use a preexisting\n socket object.\n\n backlog is the maximum number of queued connections passed to\n listen() (defaults to 100).\n\n ssl can be set to an SSLContext to enable SSL over the\n accepted connections.\n\n reuse_address tells the kernel to reuse a local socket in\n TIME_WAIT state, without waiting for its natural timeout to\n expire. If not specified will automatically be set to True on\n UNIX.\n\n reuse_port tells the kernel to allow this endpoint to be bound to\n the same port as other existing endpoints are bound to, so long as\n they all set this flag when being created. This option is not\n supported on Windows.\n\n ssl_handshake_timeout is the time in seconds that an SSL server\n will wait for completion of the SSL handshake before aborting the\n connection. Default is 60s.\n\n ssl_shutdown_timeout is the time in seconds that an SSL server\n will wait for completion of the SSL shutdown procedure\n before aborting the connection. Default is 30s.\n\n start_serving set to True (default) causes the created server\n to start accepting connections immediately. When set to False,\n the user should await Server.start_serving() or Server.serve_forever()\n to make the server to start accepting connections.\n \"\"\"\n raise NotImplementedError\n\n async def sendfile(self, transport, file, offset=0, count=None,\n *, fallback=True):\n \"\"\"Send a file through a transport.\n\n Return an amount of sent bytes.\n \"\"\"\n raise NotImplementedError\n\n async def start_tls(self, transport, protocol, sslcontext, *,\n server_side=False,\n server_hostname=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None):\n \"\"\"Upgrade a transport to TLS.\n\n Return a new transport that *protocol* should start using\n immediately.\n \"\"\"\n raise NotImplementedError\n\n async def create_unix_connection(\n self, protocol_factory, path=None, *,\n ssl=None, sock=None,\n server_hostname=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None):\n raise NotImplementedError\n\n async def create_unix_server(\n self, protocol_factory, path=None, *,\n sock=None, backlog=100, ssl=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None,\n start_serving=True):\n \"\"\"A coroutine which creates a UNIX Domain Socket server.\n\n The return value is a Server object, which can be used to stop\n the service.\n\n path is a str, representing a file system path to bind the\n server socket to.\n\n sock can optionally be specified in order to use a preexisting\n socket object.\n\n backlog is the maximum number of queued connections passed to\n listen() (defaults to 100).\n\n ssl can be set to an SSLContext to enable SSL over the\n accepted connections.\n\n ssl_handshake_timeout is the time in seconds that an SSL server\n will wait for the SSL handshake to complete (defaults to 60s).\n\n ssl_shutdown_timeout is the time in seconds that an SSL server\n will wait for the SSL shutdown to finish (defaults to 30s).\n\n start_serving set to True (default) causes the created server\n to start accepting connections immediately. When set to False,\n the user should await Server.start_serving() or Server.serve_forever()\n to make the server to start accepting connections.\n \"\"\"\n raise NotImplementedError\n\n async def connect_accepted_socket(\n self, protocol_factory, sock,\n *, ssl=None,\n ssl_handshake_timeout=None,\n ssl_shutdown_timeout=None):\n \"\"\"Handle an accepted connection.\n\n This is used by servers that accept connections outside of\n asyncio, but use asyncio to handle connections.\n\n This method is a coroutine. When completed, the coroutine\n returns a (transport, protocol) pair.\n \"\"\"\n raise NotImplementedError\n\n async def create_datagram_endpoint(self, protocol_factory,\n local_addr=None, remote_addr=None, *,\n family=0, proto=0, flags=0,\n reuse_address=None, reuse_port=None,\n allow_broadcast=None, sock=None):\n \"\"\"A coroutine which creates a datagram endpoint.\n\n This method will try to establish the endpoint in the background.\n When successful, the coroutine returns a (transport, protocol) pair.\n\n protocol_factory must be a callable returning a protocol instance.\n\n socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on\n host (or family if specified), socket type SOCK_DGRAM.\n\n reuse_address tells the kernel to reuse a local socket in\n TIME_WAIT state, without waiting for its natural timeout to\n expire. If not specified it will automatically be set to True on\n UNIX.\n\n reuse_port tells the kernel to allow this endpoint to be bound to\n the same port as other existing endpoints are bound to, so long as\n they all set this flag when being created. This option is not\n supported on Windows and some UNIX's. If the\n :py:data:`~socket.SO_REUSEPORT` constant is not defined then this\n capability is unsupported.\n\n allow_broadcast tells the kernel to allow this endpoint to send\n messages to the broadcast address.\n\n sock can optionally be specified in order to use a preexisting\n socket object.\n \"\"\"\n raise NotImplementedError\n\n # Pipes and subprocesses.\n\n async def connect_read_pipe(self, protocol_factory, pipe):\n \"\"\"Register read pipe in event loop. Set the pipe to non-blocking mode.\n\n protocol_factory should instantiate object with Protocol interface.\n pipe is a file-like object.\n Return pair (transport, protocol), where transport supports the\n ReadTransport interface.\"\"\"\n # The reason to accept file-like object instead of just file descriptor\n # is: we need to own pipe and close it at transport finishing\n # Can got complicated errors if pass f.fileno(),\n # close fd in pipe transport then close f and vice versa.\n raise NotImplementedError\n\n async def connect_write_pipe(self, protocol_factory, pipe):\n \"\"\"Register write pipe in event loop.\n\n protocol_factory should instantiate object with BaseProtocol interface.\n Pipe is file-like object already switched to nonblocking.\n Return pair (transport, protocol), where transport support\n WriteTransport interface.\"\"\"\n # The reason to accept file-like object instead of just file descriptor\n # is: we need to own pipe and close it at transport finishing\n # Can got complicated errors if pass f.fileno(),\n # close fd in pipe transport then close f and vice versa.\n raise NotImplementedError\n\n async def subprocess_shell(self, protocol_factory, cmd, *,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs):\n raise NotImplementedError\n\n async def subprocess_exec(self, protocol_factory, *args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs):\n raise NotImplementedError\n\n # Ready-based callback registration methods.\n # The add_*() methods return None.\n # The remove_*() methods return True if something was removed,\n # False if there was nothing to delete.\n\n def add_reader(self, fd, callback, *args):\n raise NotImplementedError\n\n def remove_reader(self, fd):\n raise NotImplementedError\n\n def add_writer(self, fd, callback, *args):\n raise NotImplementedError\n\n def remove_writer(self, fd):\n raise NotImplementedError\n\n # Completion based I/O methods returning Futures.\n\n async def sock_recv(self, sock, nbytes):\n raise NotImplementedError\n\n async def sock_recv_into(self, sock, buf):\n raise NotImplementedError\n\n async def sock_recvfrom(self, sock, bufsize):\n raise NotImplementedError\n\n async def sock_recvfrom_into(self, sock, buf, nbytes=0):\n raise NotImplementedError\n\n async def sock_sendall(self, sock, data):\n raise NotImplementedError\n\n async def sock_sendto(self, sock, data, address):\n raise NotImplementedError\n\n async def sock_connect(self, sock, address):\n raise NotImplementedError\n\n async def sock_accept(self, sock):\n raise NotImplementedError\n\n async def sock_sendfile(self, sock, file, offset=0, count=None,\n *, fallback=None):\n raise NotImplementedError\n\n # Signal handling.\n\n def add_signal_handler(self, sig, callback, *args):\n raise NotImplementedError\n\n def remove_signal_handler(self, sig):\n raise NotImplementedError\n\n # Task factory.\n\n def set_task_factory(self, factory):\n raise NotImplementedError\n\n def get_task_factory(self):\n raise NotImplementedError\n\n # Error handlers.\n\n def get_exception_handler(self):\n raise NotImplementedError\n\n def set_exception_handler(self, handler):\n raise NotImplementedError\n\n def default_exception_handler(self, context):\n raise NotImplementedError\n\n def call_exception_handler(self, context):\n raise NotImplementedError\n\n # Debug flag management.\n\n def get_debug(self):\n raise NotImplementedError\n\n def set_debug(self, enabled):\n raise NotImplementedError\n\n\nclass AbstractEventLoopPolicy:\n \"\"\"Abstract policy for accessing the event loop.\"\"\"\n\n def get_event_loop(self):\n \"\"\"Get the event loop for the current context.\n\n Returns an event loop object implementing the AbstractEventLoop interface,\n or raises an exception in case no event loop has been set for the\n current context and the current policy does not specify to create one.\n\n It should never return None.\"\"\"\n raise NotImplementedError\n\n def set_event_loop(self, loop):\n \"\"\"Set the event loop for the current context to loop.\"\"\"\n raise NotImplementedError\n\n def new_event_loop(self):\n \"\"\"Create and return a new event loop object according to this\n policy's rules. If there's need to set this loop as the event loop for\n the current context, set_event_loop must be called explicitly.\"\"\"\n raise NotImplementedError\n\n # Child processes handling (Unix only).\n\n def get_child_watcher(self):\n \"Get the watcher for child processes.\"\n raise NotImplementedError\n\n def set_child_watcher(self, watcher):\n \"\"\"Set the watcher for child processes.\"\"\"\n raise NotImplementedError\n\n\nclass BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):\n \"\"\"Default policy implementation for accessing the event loop.\n\n In this policy, each thread has its own event loop. However, we\n only automatically create an event loop by default for the main\n thread; other threads by default have no event loop.\n\n Other policies may have different rules (e.g. a single global\n event loop, or automatically creating an event loop per thread, or\n using some other notion of context to which an event loop is\n associated).\n \"\"\"\n\n _loop_factory = None\n\n class _Local(threading.local):\n _loop = None\n _set_called = False\n\n def __init__(self):\n self._local = self._Local()\n\n def get_event_loop(self):\n \"\"\"Get the event loop for the current context.\n\n Returns an instance of EventLoop or raises an exception.\n \"\"\"\n if (self._local._loop is None and\n not self._local._set_called and\n threading.current_thread() is threading.main_thread()):\n stacklevel = 2\n try:\n f = sys._getframe(1)\n except AttributeError:\n pass\n else:\n # Move up the call stack so that the warning is attached\n # to the line outside asyncio itself.\n while f:\n module = f.f_globals.get('__name__')\n if not (module == 'asyncio' or module.startswith('asyncio.')):\n break\n f = f.f_back\n stacklevel += 1\n import warnings\n warnings.warn('There is no current event loop',\n DeprecationWarning, stacklevel=stacklevel)\n self.set_event_loop(self.new_event_loop())\n\n if self._local._loop is None:\n raise RuntimeError('There is no current event loop in thread %r.'\n % threading.current_thread().name)\n\n return self._local._loop\n\n def set_event_loop(self, loop):\n \"\"\"Set the event loop.\"\"\"\n self._local._set_called = True\n if loop is not None and not isinstance(loop, AbstractEventLoop):\n raise TypeError(f\"loop must be an instance of AbstractEventLoop or None, not '{type(loop).__name__}'\")\n self._local._loop = loop\n\n def new_event_loop(self):\n \"\"\"Create a new event loop.\n\n You must call set_event_loop() to make this the current event\n loop.\n \"\"\"\n return self._loop_factory()\n\n\n# Event loop policy. The policy itself is always global, even if the\n# policy's rules say that there is an event loop per thread (or other\n# notion of context). The default policy is installed by the first\n# call to get_event_loop_policy().\n_event_loop_policy = None\n\n# Lock for protecting the on-the-fly creation of the event loop policy.\n_lock = threading.Lock()\n\n\n# A TLS for the running event loop, used by _get_running_loop.\nclass _RunningLoop(threading.local):\n loop_pid = (None, None)\n\n\n_running_loop = _RunningLoop()\n\n\ndef get_running_loop():\n \"\"\"Return the running event loop. Raise a RuntimeError if there is none.\n\n This function is thread-specific.\n \"\"\"\n # NOTE: this function is implemented in C (see _asynciomodule.c)\n loop = _get_running_loop()\n if loop is None:\n raise RuntimeError('no running event loop')\n return loop\n\n\ndef _get_running_loop():\n \"\"\"Return the running event loop or None.\n\n This is a low-level function intended to be used by event loops.\n This function is thread-specific.\n \"\"\"\n # NOTE: this function is implemented in C (see _asynciomodule.c)\n running_loop, pid = _running_loop.loop_pid\n if running_loop is not None and pid == os.getpid():\n return running_loop\n\n\ndef _set_running_loop(loop):\n \"\"\"Set the running event loop.\n\n This is a low-level function intended to be used by event loops.\n This function is thread-specific.\n \"\"\"\n # NOTE: this function is implemented in C (see _asynciomodule.c)\n _running_loop.loop_pid = (loop, os.getpid())\n\n\ndef _init_event_loop_policy():\n global _event_loop_policy\n with _lock:\n if _event_loop_policy is None: # pragma: no branch\n from . import DefaultEventLoopPolicy\n _event_loop_policy = DefaultEventLoopPolicy()\n\n\ndef get_event_loop_policy():\n \"\"\"Get the current event loop policy.\"\"\"\n if _event_loop_policy is None:\n _init_event_loop_policy()\n return _event_loop_policy\n\n\ndef set_event_loop_policy(policy):\n \"\"\"Set the current event loop policy.\n\n If policy is None, the default policy is restored.\"\"\"\n global _event_loop_policy\n if policy is not None and not isinstance(policy, AbstractEventLoopPolicy):\n raise TypeError(f\"policy must be an instance of AbstractEventLoopPolicy or None, not '{type(policy).__name__}'\")\n _event_loop_policy = policy\n\n\ndef get_event_loop():\n \"\"\"Return an asyncio event loop.\n\n When called from a coroutine or a callback (e.g. scheduled with call_soon\n or similar API), this function will always return the running event loop.\n\n If there is no running event loop set, the function will return\n the result of `get_event_loop_policy().get_event_loop()` call.\n \"\"\"\n # NOTE: this function is implemented in C (see _asynciomodule.c)\n current_loop = _get_running_loop()\n if current_loop is not None:\n return current_loop\n return get_event_loop_policy().get_event_loop()\n\n\ndef set_event_loop(loop):\n \"\"\"Equivalent to calling get_event_loop_policy().set_event_loop(loop).\"\"\"\n get_event_loop_policy().set_event_loop(loop)\n\n\ndef new_event_loop():\n \"\"\"Equivalent to calling get_event_loop_policy().new_event_loop().\"\"\"\n return get_event_loop_policy().new_event_loop()\n\n\ndef get_child_watcher():\n \"\"\"Equivalent to calling get_event_loop_policy().get_child_watcher().\"\"\"\n return get_event_loop_policy().get_child_watcher()\n\n\ndef set_child_watcher(watcher):\n \"\"\"Equivalent to calling\n get_event_loop_policy().set_child_watcher(watcher).\"\"\"\n return get_event_loop_policy().set_child_watcher(watcher)\n\n\n# Alias pure-Python implementations for testing purposes.\n_py__get_running_loop = _get_running_loop\n_py__set_running_loop = _set_running_loop\n_py_get_running_loop = get_running_loop\n_py_get_event_loop = get_event_loop\n\n\ntry:\n # get_event_loop() is one of the most frequently called\n # functions in asyncio. Pure Python implementation is\n # about 4 times slower than C-accelerated.\n from _asyncio import (_get_running_loop, _set_running_loop,\n get_running_loop, get_event_loop)\nexcept ImportError:\n pass\nelse:\n # Alias C implementations for testing purposes.\n _c__get_running_loop = _get_running_loop\n _c__set_running_loop = _set_running_loop\n _c_get_running_loop = get_running_loop\n _c_get_event_loop = get_event_loop\n\n\nif hasattr(os, 'fork'):\n def on_fork():\n # Reset the loop and wakeupfd in the forked child process.\n if _event_loop_policy is not None:\n _event_loop_policy._local = BaseDefaultEventLoopPolicy._Local()\n _set_running_loop(None)\n signal.set_wakeup_fd(-1)\n\n os.register_at_fork(after_in_child=on_fork)\n","repo_name":"python/cpython","sub_path":"Lib/asyncio/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":29125,"program_lang":"python","lang":"en","doc_type":"code","stars":56926,"dataset":"github-code","pt":"4"} +{"seq_id":"73222055478","text":"from Try.hyperparameter_optimization import BATCH_SIZE, EPOCHS\nfrom MNIST_Dataset.load_data import load_mnist\nimport sys\nimport os\nsys.path.append(os.pardir)\n\nimport numpy as np\nimport pickle\n\nfrom Common.trainer import Trainer\nfrom Optimizers.optimizers import SGD, Momentum, AdaGrad, Adam, RMSProp\nfrom Convolutional_neural_networks.cnns import Convolution, Maxpooling\nfrom Overfit_solutions.overfit_solutions import Dropout\nfrom BP.multilayer import AffineLyaer, SigmoidLayer, ReLULayer, SoftmaxWithLoss\nfrom Numerical_differentiation.numerical_differentiation import numerical_gradient\n\n\nBATCH_SIZE = 128\nEPOCHS = 20\n\n\nclass DeepConvNet:\n \"\"\"\n conv - relu - conv- relu - pool -\n conv - relu - conv- relu - pool -\n conv - relu - conv- relu - pool -\n affine - relu - dropout - affine - dropout - softmax\n \"\"\"\n def __init__(self, input_dim=(1, 28, 28),\n conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},\n conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},\n conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},\n conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},\n conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},\n conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},\n hidden_size=50, output_size=10):\n # weight initialization\n # number of neurons of each layer\n pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])\n weight_init_scales = np.sqrt(2.0 / pre_node_nums) # He initialization\n \n self.params = {}\n pre_channel_num = input_dim[0]\n for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):\n self.params['W' + str(idx+1)] = weight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])\n self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])\n pre_channel_num = conv_param['filter_num']\n self.params['W7'] = weight_init_scales[6] * np.random.randn(64*4*4, hidden_size)\n self.params['b7'] = np.zeros(hidden_size)\n self.params['W8'] = weight_init_scales[7] * np.random.randn(hidden_size, output_size)\n self.params['b8'] = np.zeros(output_size)\n\n # build network\n self.layers = []\n self.layers.append(Convolution(self.params['W1'], self.params['b1'], \n conv_param_1['stride'], conv_param_1['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Convolution(self.params['W2'], self.params['b2'], \n conv_param_2['stride'], conv_param_2['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Maxpooling(pool_h=2, pool_w=2, stride=2))\n self.layers.append(Convolution(self.params['W3'], self.params['b3'], \n conv_param_3['stride'], conv_param_3['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Convolution(self.params['W4'], self.params['b4'],\n conv_param_4['stride'], conv_param_4['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Maxpooling(pool_h=2, pool_w=2, stride=2))\n self.layers.append(Convolution(self.params['W5'], self.params['b5'],\n conv_param_5['stride'], conv_param_5['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Convolution(self.params['W6'], self.params['b6'],\n conv_param_6['stride'], conv_param_6['pad']))\n self.layers.append(ReLULayer())\n self.layers.append(Maxpooling(pool_h=2, pool_w=2, stride=2))\n self.layers.append(AffineLyaer(self.params['W7'], self.params['b7']))\n self.layers.append(ReLULayer())\n self.layers.append(Dropout(0.5))\n self.layers.append(AffineLyaer(self.params['W8'], self.params['b8']))\n self.layers.append(Dropout(0.5))\n \n self.last_layer = SoftmaxWithLoss()\n\n def predict(self, x, train_flg=False):\n for layer in self.layers:\n if isinstance(layer, Dropout):\n x = layer.forward(x, train_flg)\n else:\n x = layer.forward(x)\n return x\n\n def loss(self, x, t):\n y = self.predict(x, train_flg=True)\n return self.last_layer.forward(y, t)\n\n def accuracy(self, x, t, batch_size=100):\n if t.ndim != 1 : t = np.argmax(t, axis=1)\n\n acc = 0.0\n\n for i in range(int(x.shape[0] / batch_size)):\n tx = x[i*batch_size:(i+1)*batch_size]\n tt = t[i*batch_size:(i+1)*batch_size]\n y = self.predict(tx, train_flg=False)\n y = np.argmax(y, axis=1)\n acc += np.sum(y == tt)\n\n return acc / x.shape[0]\n\n def gradient(self, x, t):\n # forward\n self.loss(x, t)\n\n # backward\n dout = 1\n dout = self.last_layer.backward(dout)\n\n tmp_layers = self.layers.copy()\n tmp_layers.reverse()\n for layer in tmp_layers:\n dout = layer.backward(dout)\n\n # 设定\n grads = {}\n for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):\n grads['W' + str(i+1)] = self.layers[layer_idx].dW\n grads['b' + str(i+1)] = self.layers[layer_idx].db\n\n return grads\n\n def save_params(self, file_name=\"params.pkl\"):\n params = {}\n for key, val in self.params.items():\n params[key] = val\n with open(file_name, 'wb') as f:\n pickle.dump(params, f)\n\n def load_params(self, file_name=\"params.pkl\"):\n with open(file_name, 'rb') as f:\n params = pickle.load(f)\n for key, val in params.items():\n self.params[key] = val\n\n for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):\n self.layers[layer_idx].W = self.params['W' + str(i+1)]\n self.layers[layer_idx].b = self.params['b' + str(i+1)]\n\n\ndef main():\n (train_data, train_labels), (test_data, test_labels) = load_mnist(normalize=True, flatten=False, one_hot_label=False)\n network = DeepConvNet()\n trainer = Trainer(train_data, train_labels, test_data, test_labels,\n network, batch_size=BATCH_SIZE, epochs=EPOCHS, \n optimizer=\"SGD\", optimizer_params={\"lr\": 0.001}, to_save=False)\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"Chenkehan21/My-deep-learning-from-scratch","sub_path":"Try/try_cnn.py","file_name":"try_cnn.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"25935662747","text":"# coding: utf8\nimport os\nfrom os import environ\n#IF THERE IS ANDROID_BOOTLOGO IN DICT NAME ENVIRON SO IT WILL BE ANDROID\nif 'ANDROID_BOOTLOGO' in environ:\n os.environ['KIVY_AUDIO'] = 'android'\nelse:\n #IF NOT IT IS NOT ANDROID\n print(\"!Android\")\n\nfrom kivymd.app import MDApp\nfrom kivy.lang import Builder\nfrom kivy.utils import platform\n\nfrom oscpy.client import OSCClient\nfrom oscpy.server import OSCThreadServer\nfrom kivy.core.window import Window\nfrom kivy.factory import Factory\nfrom kivy.properties import ObjectProperty\n\nfrom mainscreen.audio import player\n\n\nfrom mainscreen.mainscreen import MainScreen\n\n\n\nif platform == 'android':\n from android.permissions import request_permissions, Permission\n\n\n\n\n request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])\nfrom jnius import autoclass\n\n\n\nif platform == 'macosx':\n Window.size = (450,750)\n\n\nSERVICE_NAME = u'{packagename}.Service{servicename}'.format(\n packagename=u'org.kivy.oscservice',\n servicename=u'Pong'\n)\n\nfrom kivymd.uix.slider import MDSlider\n\nclass MySlider(MDSlider):\n sound = ObjectProperty(None)\n\n def on_touch_up(self, touch):\n if touch.grab_current == self:\n # call super method and save its return\n ret_val = super(MySlider, self).on_touch_up(touch)\n\n # adjust position of sound\n self.sound.seek(self.max * self.value_normalized)\n\n # if sound is stopped, restart it\n if self.sound.state == 'stop':\n MDApp.get_running_app().start_play()\n\n # return the saved return value\n return ret_val\n else:\n return super(MySlider, self).on_touch_up(touch)\n\nclass ClientServerApp(MDApp):\n a = 0\n b = 0\n pilihan = 0\n def build(self):\n self.theme_cls.theme_style = \"Dark\"\n self.service = None\n # self.start_service()\n\n self.server = server = OSCThreadServer()\n server.listen(\n address=b'localhost',\n port=3002,\n default=True,\n )\n\n\n self.client = OSCClient(b'localhost', 3000)\n self.screen = Builder.load_file('main.kv')\n self.start_service()\n self.asw = ''\n\n return self.screen\n\n\n def choose_next(self):\n try:\n if platform == 'android':\n from random import choice\n from glob import glob\n dir = glob('/sdcard/*.wav')\n selected = choice(dir)\n else:\n from random import choice\n from glob import glob\n dir = glob('*.wav')\n selected =choice(dir)\n try:\n self.screen.ids.mainscreen.ids.screen1.ids.container.remove_widget(self.slider)\n except AttributeError:\n pass\n\n try:\n\n player.load(selected)\n\n self.slider = MySlider(min=0, max=player.loader.length, value=0, sound=player.loader,\n pos_hint={'center_x': 0.50, 'center_y': 0.6},\n size_hint=(0.6, 0.1))\n\n self.screen.ids.mainscreen.ids.screen1.ids.container.add_widget(self.slider)\n\n self.updater = None\n self.start_play()\n\n\n except AttributeError:\n\n print(player.loader.length)\n except:\n pass\n\n\n\n\n def recent(self):\n try:\n try:\n self.screen.ids.mainscreen.ids.screen1.ids.container.remove_widget(self.slider)\n except AttributeError:\n pass\n\n\n\n\n\n\n\n\n\n try:\n\n\n player.load(player.filename)\n self.slider = MySlider(min=0, max=player.loader.length, value=0, sound=player.loader,\n pos_hint={'center_x': 0.50, 'center_y': 0.6},\n size_hint=(0.6, 0.1))\n\n self.screen.ids.mainscreen.ids.screen1.ids.container.add_widget(self.slider)\n\n self.updater = None\n self.start_play()\n\n\n except AttributeError:\n\n print(player.loader.length)\n except:\n pass\n def selected(self, filename, asw, budi):\n for i in range(1):\n #Making For I in range to use continue\n\n if self.a == 0:\n self.asw = asw[0]\n\n self.popup = Factory.CustomPopup()\n\n self.popup.text = asw[0]\n\n self.popup.open()\n self.a = 1\n #so if self.a == 0 so self .a will be 1 and if 1 it will be executed again so i used continue\n\n continue\n\n\n\n\n if self.a == 1 :\n\n if self.asw == asw[0]:\n self.popup = Factory.CustomPopup()\n\n self.popup.text = asw[0]\n\n self.popup.open()\n\n\n if self.asw != asw[0]:\n self.asw = asw[0]\n\n\n\n self.popup = Factory.CustomPopup()\n\n self.popup.text = asw[0]\n\n self.popup.open()\n\n\n\n def start_service(self):\n if platform == 'android':\n service = autoclass(SERVICE_NAME)\n self.mActivity = autoclass(u'org.kivy.android.PythonActivity').mActivity\n argument = ''\n service.start(self.mActivity, argument)\n self.service = service\n\n elif platform in ('linux', 'linux2', 'macosx', 'win'):\n from runpy import run_path\n from threading import Thread\n self.service = Thread(\n target=run_path,\n args=['service.py'],\n kwargs={'run_name': '__main__'},\n daemon=True\n )\n self.service.start()\n else:\n raise NotImplementedError(\n \"service start not implemented on this platform\"\n )\n\n def stop_service(self):\n if self.service:\n if platform == \"android\":\n self.service.stop(self.mActivity)\n elif platform in ('linux', 'linux2', 'macos', 'win'):\n # The below method will not work.\n # Need to develop a method like\n # https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html\n self.service.stop()\n else:\n raise NotImplementedError(\n \"service start not implemented on this platform\"\n )\n self.service = None\n def send(self,argumen):\n self.display_message(argumen)\n\n def set_loop(self):\n self.b+=1\n from kivymd.toast import toast\n if self.b % 2 != 0:\n\n player.loader.loop = True\n toast(\"Loop Set To True\")\n else:\n player.loader.loop = False\n toast(\"Loop Set To False\")\n\n\n def play_again(self):\n\n player.play()\n player.loader.seek(self.last)\n\n\n def pause(self):\n\n player.loader.stop()\n self.last = player.loader.get_pos()\n\n def start_play(self, *args):\n # play the sound\n from kivy.clock import Clock\n player.loader.play()\n\n if self.updater is None:\n # schedule updates to the slider\n self.updater = Clock.schedule_interval(self.update_slider, 0.5)\n\n def update_slider(self, dt):\n # update slider\n try:\n self.slider.value = player.loader.get_pos()\n\n # if the sound has finished, stop the updating\n if player.loader.state == 'stop':\n self.updater.cancel()\n self.updater = None\n except:\n pass\n\n def display_message(self, message):\n\n try:\n self.screen.ids.mainscreen.ids.screen1.ids.container.remove_widget(self.slider)\n except AttributeError:\n pass\n\n\n\n\n\n\n\n\n\n try:\n\n\n player.load(message)\n self.slider = MySlider(min=0, max=player.loader.length, value=0, sound=player.loader,\n pos_hint={'center_x': 0.50, 'center_y': 0.6},\n size_hint=(0.6, 0.1))\n\n self.screen.ids.mainscreen.ids.screen1.ids.container.add_widget(self.slider)\n\n self.updater = None\n self.start_play()\n\n\n except AttributeError:\n\n print(player.loader.length)\n\n\n\n\nif __name__ == '__main__':\n ClientServerApp().run()","repo_name":"will702/AUDIO-PROJECT","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26325597080","text":"from rest_framework import generics, status\nfrom rest_framework.response import Response\nfrom . models import NewUser\nfrom . serializers import personDataSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework import status\n\nclass personData_list(APIView):\n def get(self, request, format=None):\n pessoa = NewUser.objects.all()\n serializer = personDataSerializer(pessoa, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = personDataSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass personData_detail(APIView):\n def get_object(self, pk):\n try:\n return NewUser.objects.get(pk=pk)\n except NewUser.DoesNotExist:\n raise Http404\n \n def get(self, request, pk, format=None):\n pessoa = self.get_object(pk)\n serializer = personDataSerializer(pessoa)\n return Response(serializer.data)\n \n def put(self, request, pk, format=None):\n pessoa = self.get_object(pk)\n serializer = personDataSerializer(pessoa, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def delete(self, request, pk, format=None):\n pessoa = self.get_object(pk)\n pessoa.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n \n","repo_name":"AndreLCSilva/TesteAPI","sub_path":"pessoa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22068295628","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport re\nimport os\n\n\ndef yzm(driver):\n\n driver.switch_to.frame('fc-iframe-wrap') # 切换到iframe 下标0 CaptchaFrame fc-iframe-wrap\n\n driver.find_element_by_class_name('fc_meta_audio_btn').click()\n print('2')\n time.sleep(1)\n try:\n for _ in range(5): # 循环5次尝试识别语音验证码\n\n download_time = time.time()\n\n driver.find_element_by_id('audio_download').click() # 下���数字语音\n\n driver2 = webdriver.Chrome()\n\n driver2.get('http://www.iflyrec.com/html/addMachineOrder.html') # 讯飞语音在线识别网址\n\n time.sleep(3) # 等待网页加载\n\n b = os.listdir('e:\\\\sound') # 扫描下载路径\n\n driver2.find_element_by_name('file').send_keys('E:\\\\sound\\\\{0}'.format(b[0])) # 上传文件\n\n time.sleep(10) # 等待识别 若网络较慢 可以适当延长\n\n a = driver2.find_element_by_id('t_WU_FILE_0').text # 获取文本\n\n sound_text = str_clerar(a) # 处理文本\n\n driver2.quit() # 关闭讯飞语音识别\n\n driver.find_element_by_class_name('response_field').click()\n driver.find_element_by_class_name('response_field').send_keys(sound_text)\n\n driver.find_element_by_id('audio_submit').click() # 进行验证\n\n os.remove('e:\\\\sound\\\\{0}'.format(b[0])) # 删除音频文件\n\n time.sleep(1)\n\n audio_error = driver.find_element_by_xpath('//*[@id=\"audio_error\"]/p').text # 错误标签\n\n print(audio_error)\n\n if not audio_error: # 若无错误标签则验证通过 跳出循环\n break\n if a == 5:\n return 0\n\n return 1\n\n except:\n return 0\n\ndef is_yzm(driver, out_time=10):\n try:\n WebDriverWait(driver, out_time - 5).until(EC.presence_of_element_located((By.ID, 'distilCaptchaForm'))) # 判断是否加载 FunCAPTCHA\n time.sleep(2)\n yzm(driver) # 破解验证码进入下一步\n return 1\n except:\n return 0\n\n\n# 处理讯飞语音识别后的文本 可根据语音情况进行修改\ndef str_clerar(mystr):\n mystr = re.sub('。|!|!', '', mystr)\n mystr = re.sub('零', '0', mystr)\n mystr = re.sub('一', '1', mystr)\n mystr = re.sub('二', '2', mystr)\n mystr = re.sub('三', '3', mystr)\n mystr = re.sub('四|是', '4', mystr)\n mystr = re.sub('五', '5', mystr)\n mystr = re.sub('六', '6', mystr)\n mystr = re.sub('七', '7', mystr)\n mystr = re.sub('八', '8', mystr)\n mystr = re.sub('九|酒', '9', mystr)\n return mystr\n\n\ndef main():\n\n myPath = 'e:\\\\sound\\\\' # 存储音频地址\n\n options = webdriver.ChromeOptions()\n prefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': myPath} # 设置下载路径 第一个参数为不弹框 第二个参数为配置路径\n options.add_experimental_option('prefs', prefs)\n # options.add_argument(\"--headless\")\n\n driver = webdriver.Chrome(chrome_options=options)\n\n driver.get('your url of distil networks')\n\n is_yzm(driver) # 识别验证码\n\n\nif __name__ == '__main__':\n main()","repo_name":"wc110302/My-spider","sub_path":"破解distil-networks语音验证码/distil-netwoks.py","file_name":"distil-netwoks.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"4"} +{"seq_id":"17418579431","text":"import numpy as np\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\n\n#make the weight no height or light\n##fan_in is the number of input node\n##fan_out is the number of output node\ndef xavier_init(fan_in, fan_out, constant = 1):\n low = -constant * np.sqrt(6.0 / (fan_in + fan_out))\n high = constant * np.sqrt(6.0 / (fan_in + fan_out))\n ###come on, i can't understand this function\n ###--tf.random_uniform((x,y),minval=low,maxval=high,dtype=tf.float32)))\n ###-- return a (x*y)matrix, the value is between low and high, the values are uniformly distributed\n return tf.random_uniform((fan_in, fan_out),\n minval = low, maxval = high,\n dtype = tf.float32)\n\n\nclass AdditiveGaussianNoiseAutoencoder(object):\n ##n_input is the number of the variable\n ##n_hidden is the Hidden layer node number\n ##transfer_function is the Hidden layer's Activation function\n ##optimizer\n ##scale is the coefficient of the noise\n def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus,\n optimizer = tf.train.AdamOptimizer(), scale=0.1):\n self.n_input = n_input\n self.n_hidden = n_hidden\n self.transfer = transfer_function\n self.scale = tf.placeholder(tf.float32)\n self.training_scale = scale\n network_weights = self._initializer_weights()\n self.weigths = network_weights\n\n # x is the trainData\n ##placeholder as the place for data input\n self.x = tf.placeholder(tf.float32,[None, self.n_input])\n ##tf.matnul is Matrix multiplication\n ##tf.add just is add\n self.hidden = self.transfer(tf.add(tf.matmul(\n self.x+scale * tf.random_normal((n_input,)),\n self.weigths['w1']), self.weigths['b1']))\n self.reconstruction = tf.add(tf.matmul(self.hidden,\n self.weigths['w2']), self.weigths['b2'])\n\n #define a lose fuction\n ##the Squared Error as the cost\n self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(\n self.reconstruction, self.x), 2.0)) #tf.reduce_mean\n ##use the optimizer to optimize the cost\n self.optimizer = optimizer.minimize(self.cost)\n\n ##create a Session and init all variables of the model\n init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(init)\n\n\n #initalize the werghts\n def _initializer_weights(self):\n ##dict can create a dictionary and return a dictionary\n ###dict(a='a', b='b', t='t') # 传入关键字\n ###{'a': 'a', 'b': 'b', 't': 't'}\n all_weights = dict()\n all_weights['w1'] = tf.Variable(xavier_init(self.n_input,\n self.n_hidden))\n ##tf.zeros(shape[], dtype=tf.float32, name=None)\n all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],\n dtype = tf.float32))\n all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,\n self.n_input], dtype = tf.float32))\n all_weights['b2'] = tf.Variable(tf.zeros([self.n_input],\n dtype = tf.float32))\n\n return all_weights\n\n # caculate the cost and run the optimizer\n def partial_fit(self, X):\n ##get the self.cost and self.optimizer as result of the feed_dict is the {self.x: X, self.scale: self.training_scale}\n cost, opt = self.sess.run((self.cost, self.optimizer),\n feed_dict = {self.x: X, self.scale: self.training_scale})\n return cost\n\n #caculate the cost\n def calc_total_cost(self, X):\n return self.sess.run(self.cost, feed_dict = {self.x: X,\n self.scale: self.training_scale})\n\n def transform(self, X):\n return self.sess.run(self.hidden, feed_dict={self.x: X,\n self.scale: self.training_scale})\n\n def generate(self, hidden = None):\n if hidden is None:\n hidden = np.random.normal(size=self.weigths[\"b1\"])\n return self.sess.run(self.reconstruction,\n feed_dict={self.hidden: hidden})\n\n def reconstruct(self, X):\n return self.sess.run(self.reconstruction, feed_dict={self.x: X,\n self.scale: self.training_scale})\n\n def getWeights(self):\n return self.sess.run(self.weigths['w1'])\n\n def getBiases(self):\n return self.sess.run(self.weigths['b1'])\n\n\n\n\n\n\n#here is test code\nmnist = input_data.read_data_sets('MNIST_data',one_hot=True)\n\n##preprocessing the data about the train and the test\ndef standard_scale(X_train, X_test):\n preprocesser = prep.StandardScaler().fit(X_train)\n X_train = preprocesser.transform(X_train)\n X_test = preprocesser.transform(X_test)\n return X_train, X_test\n\n##get the dataBlock from data by random, and never put back\n##all in all, I don't know why this fuction have the fuction like never put back\ndef get_random_block_from_data(data, batch_size):\n start_index = np.random.randint(0, len(data) - batch_size)\n return data[start_index:(start_index+batch_size)]\n\n##get the data about the train and the test\nX_train, X_test = standard_scale(mnist.train.images, mnist.test.images)\n\n##get the samples numbers of train_data\nn_samples = int(mnist.train.num_examples)\n##the Max training times\ntraining_epochs = 20\n##any random block size\nbatch_size = 128\n##every epoch display the cost once\ndisplay_step = 1\n##create a AGN, and put the variables....\nautoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,\n n_hidden = 200,\n transfer_function = tf.nn.softplus,\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001),\n scale = 0.01)\n\nfor epoch in range(training_epochs):\n avg_cost = 0;\n total_batch = int(n_samples / batch_size)\n for i in range(total_batch):\n batch_xs = get_random_block_from_data(X_train, batch_size) # X_train\n\n cost = autoencoder.partial_fit(batch_xs)\n avg_cost += cost / n_samples * batch_size\n\n if epoch % display_step == 0:\n print(\"Epoch:\",'%04d' % (epoch + 1), \"cost=\",\n \"{:.9f}\".format(avg_cost))\n\nprint(\"Total cost: \" + str(autoencoder.calc_total_cost(X_test))) #X_test\n\n\nencode_decode = autoencoder.reconstruct(X_test)\nf, a = plt.subplots(2, 10, figsize=(10, 2))\nfor i in range(10):\n a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))\n a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))\nplt.show()\n\n\n","repo_name":"iajqs/Mytensorflow","sub_path":"mylearn/pratice/Autoencoder/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"28259569889","text":"import datetime\nimport json\nimport logging\nimport os\nimport pathlib\nimport pydoc\nimport sys\nfrom itertools import chain\n\nimport datasets\nimport fire\nimport peft\nimport torch\nimport yaml\n\nimport transformers\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\n\ndef load_dataset(train_file: str, valid_size: float = 0.1, seed: int = 42):\n dataset = datasets.load_dataset(\"json\", data_files=train_file, split=\"train\")\n return dataset.train_test_split(valid_size, shuffle=True, seed=seed)\n\n\ndef group_texts(examples, block_size: int):\n concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n\n total_length = len(concatenated_examples[list(examples.keys())[0]])\n total_length = (total_length // block_size) * block_size\n result = {\n k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items()\n }\n result[\"labels\"] = result[\"input_ids\"].copy()\n return result\n\n\ndef preprocess(examples, tokenizer, input_template: str, block_size: int = 512, num_proc: int = 4):\n prompts = examples.map(lambda x: {\"text\": input_template.format_map(x)})\n tokenized = prompts.map(\n lambda x: tokenizer(x[\"text\"]), batched=True, num_proc=num_proc, remove_columns=prompts[\"train\"].features\n )\n grouped = tokenized.map(lambda x: group_texts(x, block_size), batched=True, num_proc=num_proc)\n return grouped\n\n\ndef main(config_file: str, model_name: str = None):\n # 設定ファイルの読み込み\n with open(config_file, \"r\") as i_:\n config = yaml.safe_load(i_)\n\n # config['model'] は AutoModelForCausalLM.from_pretrained で読み込む際のパラメータ\n # モデル名を設定ファイルではなく cli の引数として持てるようにしているので、ここで config['model'] に設定\n if model_name is not None:\n config[\"model\"][\"pretrained_model_name_or_path\"] = model_name\n\n # 出力先ディレクトリの設定\n output_dir = pathlib.Path(os.path.expandvars(config[\"outputs\"][\"dirname\"]))\n output_dir.mkdir(parents=True, exist_ok=True)\n # 出力先ディレクトリの中に、モデル名のディレクトリを作成し、訓練結果の保存先とする\n model_output_dir = output_dir.joinpath(model_name)\n config[\"training\"][\"output_dir\"] = model_output_dir\n\n # 出力先ディレクトリに、最終的な設定値を保存しておく\n with open(output_dir.joinpath(\"config.yaml\"), \"w\") as o_:\n yaml.dump(config, o_)\n\n # データセットのロード\n logger.info(f\"load datasets\")\n dataset = load_dataset(**config[\"data\"])\n\n logger.info(f\"load tokenizer\")\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n logger.info(f\"set pad_token to {tokenizer.pad_token}\")\n\n logger.info(f\"load model: {config['model']}\")\n # torch_dtypeを文字列から型に変換しておく\n if \"torch_dtype\" in config[\"model\"]:\n config[\"model\"][\"torch_dtype\"] = pydoc.locate(config[\"model\"][\"torch_dtype\"])\n model = transformers.AutoModelForCausalLM.from_pretrained(**config[\"model\"])\n\n # 訓練対象の重みの指定\n # config['finetuning']['trainables'] に設定されていない重みについては requires_grad = False として訓練対象から除外する\n if \"trainables\" in config.get(\"finetuning\", {}):\n trainable_params = config[\"finetuning\"][\"trainables\"]\n for name, param in model.named_parameters():\n trainable = False\n for trainable_param in trainable_params:\n if trainable_param in name:\n trainable = True\n if not trainable:\n param.requires_grad = False\n # 訓練対象の重みについては、訓練の安定性の観点から float32 にしておく\n for name, param in model.named_parameters():\n if param.requires_grad:\n if param.dtype != torch.float32:\n param.data = param.data.to(torch.float32)\n print(\"convert to float32\")\n print(f\"tune {name} with {param.numel()} params\")\n\n # モデルによっては以下のエラーが出るので暫定的な対応\n # AttributeError: 'function' object has no attribute '__func__'. Did you mean: '__doc__'?\n if not hasattr(model.forward, \"__func__\"):\n logger.info(\"add peft_model.forward.__func__\")\n model.forward.__func__ = model.__class__.forward\n\n # データセットの前処理. ���下の形式に変換する\n # {'input_ids': [token1, token2, ...]}\n logger.info(\"convert datasets with input_template\")\n lm_dataset = preprocess(dataset, tokenizer, config[\"input_template\"])\n # data_collator では、訓練用にデータを加工する\n # DataCollatorForLanguageModeling では、'input_ids' を 'labels' に設定する\n data_collator = transformers.DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)\n training_args = transformers.TrainingArguments(**config[\"training\"])\n # warning が出るので、 use_cache = False としておく\n model.config.use_cache = False\n trainer = transformers.Trainer(\n model=model,\n tokenizer=tokenizer,\n train_dataset=lm_dataset[\"train\"],\n eval_dataset=lm_dataset[\"test\"],\n args=training_args,\n data_collator=data_collator,\n )\n\n with torch.autocast(\"cuda\"):\n result = trainer.train()\n model.save_pretrained(model_output_dir)\n logger.info(\"successfully finished finetuning.\")\n\n\nif __name__ == \"__main__\":\n fire.Fire(main)\n","repo_name":"ohtaman/abci-examples","sub_path":"202307/src/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"ja","doc_type":"code","stars":20,"dataset":"github-code","pt":"4"} +{"seq_id":"1950232081","text":"# Three phase AC waveforms, voltage between two phases\n\nfrom pylab import *\nt = linspace(0, .05, 300) # 300 point array, from 0 to .1 seconds\nf = 50 # 50 Hz AC\nVm = 230 * sqrt(2)\ny1 = Vm * sin(2*pi*f*t) \t# phase 1\ny2 = Vm * sin(2*pi*f*t + 120*pi/180) # phase 2, 120 degree out of phase\ny3 = Vm * sin(2*pi*f*t + 240*pi/180) # phase 3, 120 degree out of phase\n\nplot(t, y1)\t\t\nplot(t, y2)\t\t\nplot(t, y3)\t\t\nplot(t, y2-y1, color='black')\t\t\nshow()\n","repo_name":"sposh-science/pycode-browser","sub_path":"Code/Python for Schools/3phase-ac.py","file_name":"3phase-ac.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"4"} +{"seq_id":"33224425029","text":"import app\nfrom app import app\nfrom app.models import User, Poster, Stat, QuestionOption\nimport unittest\n\nclass TestBD(unittest.TestCase):\n\n def testInsertUpdateRemoveUser(self):\n # Insert User\n u1 = User(username='testUser5', email='testuser5@gmail.com')\n u1.addUser()\n u2 = User.getUserByUsername('testUser5')\n self.assertTrue(print(u1)==print(u2))\n\n # Update User\n u1 = User.getUserByUsername('testUser5')\n u1.email = 'testusermodified@gmail.com'\n u1.updateUser()\n u2 = User.getUserByUsername('testUser5')\n self.assertTrue(print(u1)==print(u2))\n\n #Remove User\n u1 = User.getUserByUsername('testUser5')\n u1.removeUser()\n u2 = User.getUserByUsername('testUser5')\n self.assertTrue(u2==None)\n\n def testInsertUpdateRemovePoster(self):\n # Insert Poster\n p1 = Poster(id=65535, id_usuario=1, info='Info Test')\n p1.addPoster()\n p2 = Poster.getPosterById(65535)\n self.assertTrue(print(p1)==print(p2))\n\n # Update Poster\n p1 = Poster.getPosterById(65535)\n p1.info = 'Info modified'\n p1.updatePoster()\n p2 = Poster.getPosterById(65535)\n self.assertTrue(print(p1)==print(p2))\n\n # Remove Poster\n p1 = Poster.getPosterById(65535)\n p1.removePoster()\n p2 = Poster.getPosterById(65535)\n self.assertTrue(p2==None)\n\n def testInsertUpdateRemoveStat(self):\n # Insert Stat\n s1 = Stat(id=65535, id_usuario=1, dato_estadistico_1='Dato 1 Test', dato_estadistico_2='Dato 2 Test')\n s1.addStat()\n s2 = Stat.getStatById(65535)\n self.assertTrue(print(s1)==print(s2))\n\n # Update Stat\n s1 = Stat.getStatById(65535)\n s1.dato_estadistico_1 = 'Dato 1 modified'\n s1.updateStat()\n s2 = Stat.getStatById(65535)\n self.assertTrue(print(s1)==print(s2))\n\n # Remove Stat\n s1 = Stat.getStatById(65535)\n s1.removeStat()\n s2 = Stat.getStatById(65535)\n self.assertTrue(s2==None)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"jorgegene/sis-inf-project","sub_path":"test/testDB.py","file_name":"testDB.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14089327831","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom collections import Counter\nfrom twitter import *\nimport json, math, sys, datetime\nfrom google.appengine.ext import db\n\nclass Query(db.Model):\n query = db.StringProperty()\n result = db.StringProperty(required = True)\n timestamp = db.DateTimeProperty(required = True)\n\napp = Flask(__name__)\napp.config.from_pyfile(\"tweetsneak.py\")\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/search\")\ndef search():\n q = request.args.get(\"q\")\n if q is None:\n q = \"\"\n \n BEARER_TOKEN = oauth2_dance(app.config[\"CONSUMER_KEY\"], app.config[\"CONSUMER_SECRET\"])\n t = Twitter(auth=OAuth2(bearer_token = BEARER_TOKEN))\n \n tweets = []\n max_id = 0\n \n while len(tweets) < app.config[\"MAX_TWEETS\"]:\n try:\n for tweet in iter(t.search.tweets(q=q, count=app.config[\"MAX_TWEETS\"], max_id=max_id)[\"statuses\"]):\n tweets.append(tweet)\n if max_id == tweets[-1][\"id\"]:\n break\n else:\n max_id = tweets[-1][\"id\"]\n except TwitterHTTPError:\n break\n except IndexError:\n break\n \n transtab = dict((ord(char), None) for char in u\"-=+|!@#$%^&*()`~[]{};:'\\\",<.>\\\\/?\") #trans table for removing punctuation\n word_list = Counter()\n \n for tweet in tweets:\n for word in tweet[\"text\"].translate(transtab).split():\n word_list[word.lower()] += 1\n \n most_common = word_list.most_common(10)\n most_common_json = json.dumps(most_common)\n \n entity = Query(query=q, result=most_common_json, timestamp=datetime.datetime.utcnow())\n entity.put()\n \n most_common = enumerate(most_common)\n num_pages = int(math.ceil(float(len(tweets)) / app.config[\"RPP\"]))\n if num_pages == 0:\n num_pages = 1\n \n return render_template(\"search.html\", q = q, tweets = map(json.dumps, tweets), most_common = most_common, rpp = app.config[\"RPP\"], num_pages = num_pages)\n \n@app.errorhandler(404)\ndef notfound(e):\n return render_template(\"404.html\"), 404","repo_name":"DerekDuchesne/tweetsneak","sub_path":"tweetsneak_python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"37302351900","text":"__author__ = 'mactep'\n\nimport socketserver\n\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n\n def handle(self):\n data = self.request.recv(1024).strip()\n print(\"Some input: %s\" % data.decode())\n to_send = data\n self.request.send(to_send)\n\nif __name__ == \"__main__\":\n HOST, PORT = \"localhost\", 9999\n\n server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)\n server.serve_forever()\n","repo_name":"zmactep/ig-pipeline","sub_path":"ig-frontend/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"698494320","text":"import docdeid as dd\n\n\nclass DeduceMergeAdjacentAnnotations(dd.process.MergeAdjacentAnnotations):\n \"\"\"\n Merge adjacent tags, according to deduce logic:\n\n - adjacent annotations with mixed patient/person tags are replaced with a patient annotation\n \"\"\"\n\n def _tags_match(self, left_tag: str, right_tag: str) -> bool:\n \"\"\"\n Define whether two tags match. This is the case when they are equal strings, and additionally patient and person\n tags are also regarded as equal.\n\n Args:\n left_tag: The left tag.\n right_tag: The right tag.\n\n Returns:\n ``True`` if tags match, ``False`` otherwise.\n \"\"\"\n\n return (left_tag == right_tag) or {left_tag, right_tag} == {\n \"patient\",\n \"persoon\",\n }\n\n def _adjacent_annotations_replacement(\n self,\n left_annotation: dd.Annotation,\n right_annotation: dd.Annotation,\n text: str,\n ) -> dd.Annotation:\n \"\"\"\n Replace two annotations that have equal tags with a new annotation.\n\n If one of the two annotations has the patient tag, the new annotation will also be tagged patient. In other\n cases, the tags are already equal.\n \"\"\"\n\n if left_annotation.tag != right_annotation.tag:\n replacement_tag = \"patient\"\n else:\n replacement_tag = left_annotation.tag\n\n return dd.Annotation(\n text=text[left_annotation.start_char : right_annotation.end_char],\n start_char=left_annotation.start_char,\n end_char=right_annotation.end_char,\n tag=replacement_tag,\n )\n\n\nclass PersonAnnotationConverter(dd.process.AnnotationProcessor):\n \"\"\"\n Responsible for processing the annotations produced by all name annotators (regular and context-based).\n\n Resolves overlap between them, and then maps the tags to either \"patient\" or \"persoon\", based on whether \"patient\"\n is in the tag (e.g. voornaam_patient => patient, achternaam_onbekend => persoon).\n \"\"\"\n\n def __init__(self) -> None:\n self._overlap_resolver = dd.process.OverlapResolver(\n sort_by=[\"tag\", \"length\"],\n sort_by_callbacks={\"tag\": lambda x: \"patient\" not in x, \"length\": lambda x: -x},\n )\n\n def process_annotations(self, annotations: dd.AnnotationSet, text: str) -> dd.AnnotationSet:\n\n new_annotations = self._overlap_resolver.process_annotations(annotations, text=text)\n\n return dd.AnnotationSet(\n dd.Annotation(\n text=annotation.text,\n start_char=annotation.start_char,\n end_char=annotation.end_char,\n tag=\"patient\" if \"patient\" in annotation.tag else \"persoon\",\n )\n for annotation in new_annotations\n )\n","repo_name":"jacob-rousseau/deduce","sub_path":"deduce/process/annotation_processing.py","file_name":"annotation_processing.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43238294355","text":"# request_utils.py\n# ~~~~~~~~~~~~~~~~~~~~~\n\nimport functools\nimport inspect\nimport typing\nfrom abc import ABCMeta, abstractmethod\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import Any, Dict, Iterable, Union\n\nfrom requests.hooks import dispatch_hook\nfrom requests.models import Request, Response\nfrom requests.sessions import PreparedRequest\nfrom requests.sessions import Session as _Session\n\n\nclass HookEvent(str, Enum):\n RESPONSE = \"response\"\n REQUEST = \"request\"\n\n\nclass IResponseHook(metaclass=ABCMeta):\n @abstractmethod\n def __call__(self, response: Response, **kwds: Any) -> Any:\n ...\n\n\nclass IRequestHook(metaclass=ABCMeta):\n @abstractmethod\n def __call__(self, prepare_req: PreparedRequest, **kwds) -> Any:\n ...\n\n\nclass Session(_Session):\n \"\"\"\n Session Class\n \"\"\"\n\n def request(\n self,\n method,\n url,\n params=None,\n data=None,\n headers=None,\n cookies=None,\n files=None,\n auth=None,\n timeout=None,\n allow_redirects=True,\n proxies=None,\n hooks=None,\n stream=None,\n verify=None,\n cert=None,\n json=None,\n ):\n request_hooks = []\n if hooks:\n hooks = typing.cast(typing.Dict, hooks)\n if HookEvent.REQUEST in hooks:\n request_hooks = hooks.pop(HookEvent.REQUEST)\n req = Request(\n method=method.upper(),\n url=url,\n headers=headers,\n files=files,\n data=data or {},\n json=json,\n params=params or {},\n auth=auth,\n cookies=cookies,\n hooks=hooks,\n )\n prep = self.prepare_request(req)\n\n if request_hooks:\n # handle request hooks\n prep = dispatch_hook(\n HookEvent.REQUEST,\n {HookEvent.REQUEST: request_hooks},\n prep,\n )\n\n proxies = proxies or {}\n\n settings = self.merge_environment_settings(\n prep.url, proxies, stream, verify, cert\n )\n\n send_kwargs = {\n \"timeout\": timeout,\n \"allow_redirects\": allow_redirects,\n }\n send_kwargs.update(settings)\n resp = self.send(prep, **send_kwargs)\n\n return resp\n\n\nclass RequestHookMixin:\n def __init__(self) -> None:\n self._hooks: Dict = defaultdict(list)\n\n def iter_check_hooks(self, hooks):\n if hasattr(hooks, \"__call__\"):\n hooks = [hooks]\n for hook in hooks:\n if not inspect.isclass(hook):\n continue\n if issubclass(hook, IResponseHook):\n yield hook\n elif issubclass(hook, IRequestHook):\n yield hook\n continue\n\n def register_hooks(\n self,\n hook_event: HookEvent,\n hooks: Union[Iterable[typing.Callable], typing.Callable],\n ):\n for valid_hook in self.iter_check_hooks(hooks):\n if valid_hook not in self._hooks[hook_event]:\n self._hooks[hook_event].append(valid_hook)\n\n def deregister_hooks(\n self,\n hook_event: HookEvent,\n hooks: Union[Iterable[typing.Callable], typing.Callable],\n ):\n if hook_event not in self._hooks:\n return\n event_hooks = []\n for register_hook in self._hooks[hook_event]:\n if register_hook not in self.iter_check_hooks(hooks):\n event_hooks.append(register_hook)\n self._hooks[hook_event] = event_hooks\n\n @property\n def hooks(self):\n return self._hooks\n\n @hooks.setter\n def hooks(self, hook_map: dict):\n self._hooks = defaultdict(list)\n for k, v in hook_map.items():\n if not isinstance(v, Iterable):\n v = [v]\n self.register_hooks(k, v)\n\n @hooks.deleter\n def hooks(self):\n self._hooks = defaultdict(list)\n\n\nclass RequestsCtx:\n def __enter__(self):\n self.session = Session()\n return self\n\n def __exit__(self, *args: typing.Any):\n del self.session\n\n\nclass Requests(RequestsCtx, RequestHookMixin):\n \"\"\"\n Requests Class\n \"\"\"\n\n def __init__(\n self,\n url=None,\n headers=None,\n proxies=None,\n cookies=None,\n json=None,\n params=None,\n **kwgs: Dict,\n ) -> None:\n self.persist_settions = {\n \"url\": url,\n \"headers\": headers,\n \"proxies\": proxies,\n \"cookies\": cookies,\n \"json\": json,\n \"params\": params,\n **kwgs,\n }\n return super().__init__()\n\n def request(self, method, url=None, **kwargs):\n _hooks = {}\n for event_key in self.hooks:\n _hooks[event_key] = [_h() for _h in self.hooks[event_key]]\n if not url:\n url = self.persist_settions.get(\"url\") or kwargs.get(\n \"url\"\n )\n settings = {}\n settings.update(self.persist_settions)\n settings.update(kwargs)\n settings.pop(\"url\", 0)\n url = typing.cast(str, url)\n request_func = functools.partial(\n self.session.request, method, url\n )\n return request_func(hooks=_hooks, **settings)\n\n def get(self, url=None, *args, **kwargs) -> Response:\n return self.request(\"GET\", url, *args, **kwargs)\n\n def post(self, url=None, data=None, json=None, **kwargs):\n return self.request(\n \"post\", url, data=data, json=json, **kwargs\n )\n","repo_name":"mapyJJJ/simple-starlette","sub_path":"simple_starlette/http_client/request_utils.py","file_name":"request_utils.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"74026762997","text":"import logging\nfrom dataclasses import asdict, dataclass\nfrom http import HTTPStatus\nfrom typing import Callable, Optional, Type, Union\n\nfrom flask import Flask\n\nfrom app.exceptions.application_errors import (\n ApplicationError,\n ParameterError,\n ResourceConflictError,\n ResourceNotFoundError,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass()\nclass ResponseBase:\n detail: Optional[str] = None\n status: Optional[int] = None\n title: Optional[str] = None\n type: Optional[str] = None\n code: Optional[str] = None\n\n\ndef handle_application_error(e: ApplicationError):\n http_status = HTTPStatus.INTERNAL_SERVER_ERROR\n logger.exception(\"application error.\")\n return (\n asdict(\n ResponseBase(\n detail=repr(e),\n status=http_status,\n title=http_status.phrase,\n code=e.code,\n )\n ),\n http_status,\n )\n\n\ndef handle_resource_conflict_error(e: ResourceConflictError):\n http_status = HTTPStatus.CONFLICT\n logger.exception(\"resource conflict error.\")\n return (\n asdict(\n ResponseBase(\n detail=repr(e),\n status=http_status,\n title=http_status.phrase,\n code=e.code,\n )\n ),\n http_status,\n )\n\n\ndef handle_parameter_error(e: ParameterError):\n http_status = HTTPStatus.BAD_REQUEST\n logger.exception(\"parameter error.\")\n return (\n asdict(\n ResponseBase(\n detail=repr(e),\n status=http_status,\n title=http_status.phrase,\n code=e.code,\n )\n ),\n http_status,\n )\n\n\ndef handle_resource_not_found_error(e: ResourceNotFoundError):\n http_status = HTTPStatus.NOT_FOUND\n logger.exception(\"resource not found.\")\n return (\n asdict(\n ResponseBase(\n detail=repr(e),\n status=http_status,\n title=http_status.phrase,\n code=e.code,\n )\n ),\n http_status,\n )\n\n\ndef handle_exception(e: Exception):\n http_status = HTTPStatus.INTERNAL_SERVER_ERROR\n logger.exception(\"internal error.\")\n return (\n asdict(\n ResponseBase(\n detail=repr(e),\n status=http_status,\n title=http_status.phrase,\n )\n ),\n http_status,\n )\n\n\nexception_to_handler: dict[Union[Type[Exception], int], Callable] = {\n ApplicationError: handle_application_error,\n ResourceConflictError: handle_resource_conflict_error,\n ParameterError: handle_parameter_error,\n ResourceNotFoundError: handle_resource_not_found_error,\n Exception: handle_exception,\n}\n\n\ndef register_error_handlers(app: Flask):\n for exception, handler in exception_to_handler.items():\n app.register_error_handler(exception, handler)\n","repo_name":"itacode/python-microservice-starter","sub_path":"app/exceptions/error_handlers.py","file_name":"error_handlers.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7318839937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = \"Ivo Marvan\"\n__email__ = \"ivo@marvan.cz\"\n__description__ = '''\n Storage for image in directory\n Implements ImgProcessor, ImgStorageBase interfaces.\n'''\n\nimport sys\nimport os\n\n# root of project repository\nTHE_FILE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_ROOT = os.path.abspath(os.path.join(THE_FILE_DIR, '../..', '..', '..'))\nsys.path.append(PROJECT_ROOT)\n\nfrom src.img.processor.storage.base import ImgStorageBase\nfrom src.img.container.image import Image\nfrom src.img.processor.types import DirectoryType\n\nclass ImgStorageDir(ImgStorageBase):\n \"\"\"\n File directory as storage for result images.\n \"\"\"\n\n def __init__(\n self,\n path: DirectoryType(descr='The path for storing images.', must_exists=False),\n ):\n super().__init__('dir.' + path)\n self._path = path\n\n def store(self, img: Image, params=None, extension: str = None) -> bool:\n '''\n @see src.img.storage.base.ImgStorageBase.store\n\n extension == None means keep stored extension or use default\n '''\n img.store_to_file(path=self._path, params=params, extension=extension)\n return img\n","repo_name":"ivomarvan/quick_faces","sub_path":"src/img/processor/storage/dir.py","file_name":"dir.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"28786710357","text":"\"\"\"\nOneNoteAnalyzer\n\nAssemblyline service using the OneNoteAnalzer tool to analyze OneNote files.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport subprocess\n\nfrom pathlib import Path\nfrom collections import defaultdict\n\nfrom PIL import UnidentifiedImageError\n\nfrom assemblyline.common import forge\nfrom assemblyline.common.str_utils import safe_str\nfrom assemblyline_v4_service.common.balbuzard.patterns import PatternMatch\nfrom assemblyline_v4_service.common.base import ServiceBase\n\nfrom assemblyline_v4_service.common.extractor.ocr import detections\nfrom assemblyline_v4_service.common.request import ServiceRequest\nfrom assemblyline_v4_service.common.result import (\n Heuristic,\n KVSectionBody,\n Result,\n ResultImageSection,\n ResultSection,\n)\nfrom assemblyline_v4_service.common.utils import extract_passwords\n\n\nclass OneNoteAnalyzer(ServiceBase):\n \"\"\"OneNoteAnalyzer Service\"\"\"\n\n LAUNCHABLE_EXTENSIONS = {\n \".ade\",\n \".adp\",\n \".as\", # Adobe ActionScript\n \".bat\", # DOS/Windows batch file\n \".chm\",\n \".cmd\", # Windows command\n \".com\", # DOS command\n \".cpl\",\n \".exe\", # DOS/Windows executable\n \".dll\", # Windows library\n \".hta\",\n \".inf\", # Windows autorun file\n \".ins\",\n \".isp\",\n \".jar\", # Java JAR\n \".jse\",\n \".js\", # Javascript\n \".lib\",\n \".lnk\", # Windows shortcut\n \".mde\",\n \".msc\",\n \".msp\",\n \".mst\",\n \".pif\",\n \".py\", # Python script\n \".scr\", # Windows screen saver\n \".sct\",\n \".shb\",\n \".sys\",\n \".url\", # Windows URL Shortcut\n \".vb\", # VB Script\n \".vbe\", # Encrypted VB script\n \".vbs\", # VB Script\n \".vxd\",\n \".wsc\",\n \".wsf\",\n \".wsh\",\n }\n\n LAUNCHABLE_TYPE = {\n \"code/batch\",\n \"code/ps1\",\n \"code/python\",\n \"code/vbs\",\n }\n\n LAUNCHABLE_TYPE_PREFIX = {\"executable\", \"shortcut\"}\n\n def __init__(self, config: dict | None = None) -> None:\n super().__init__(config)\n self.identify = forge.get_identify(use_cache=os.environ.get(\"PRIVILEGED\", \"false\").lower() == \"true\")\n\n def execute(self, request: ServiceRequest) -> None:\n subprocess.run(\n [\n \"wine\",\n \"OneNoteAnalyzer/OneNoteAnalyzer.exe\",\n \"--file\",\n request.file_path,\n ],\n capture_output=True,\n check=False,\n )\n output_dir = Path(f\"{request.file_path}_content/\")\n if output_dir.exists():\n request.result = Result([section for section in self._make_results(request, output_dir) if section])\n else:\n request.result = Result()\n\n def _make_results(\n self, request: ServiceRequest, output_dir: Path\n ) -> tuple[ResultSection | None, ResultSection | None, ResultSection | None, ResultSection | None]:\n self._make_hyperlinks_section(request, output_dir / \"OneNoteHyperLinks\")\n return (\n self._make_attachments_section(request, output_dir / \"OneNoteAttachments\"),\n self._make_preview_section(request, output_dir / f\"ConvertImage_{Path(request.file_path).stem}.png\"),\n self._make_images_section(request, output_dir / \"OneNoteImages\"),\n self._make_text_section(request, output_dir / \"OneNoteText\"),\n )\n\n def _make_attachments_section(self, request: ServiceRequest, attachments_dir: Path) -> ResultSection | None:\n if not attachments_dir.exists():\n return None\n executable_attachments: list[str] = []\n attachments: list[str] = []\n for file_path in attachments_dir.iterdir():\n if not file_path.is_file():\n continue\n request.add_extracted(\n str(file_path),\n file_path.name,\n \"attachment extracted from onenote.\",\n )\n file_type: str = self.identify.fileinfo(str(file_path))[\"type\"]\n if (\n file_path.suffix in self.LAUNCHABLE_EXTENSIONS\n or file_type in self.LAUNCHABLE_TYPE\n or file_type.split(\"/\", 1)[0] in self.LAUNCHABLE_TYPE_PREFIX\n ):\n executable_attachments.append(file_path.name)\n else:\n attachments.append(file_path.name)\n\n if not attachments and not executable_attachments:\n return None\n return ResultSection(\n \"OneNote Attachments\",\n body=\"Executables:\\n\" + \"\\n\".join(executable_attachments) + \"Other:\\n\" + \"\\n\".join(attachments),\n heuristic=Heuristic(1) if executable_attachments else None,\n )\n\n def _make_preview_section(self, request: ServiceRequest, preview_path: Path) -> ResultImageSection | None:\n if not (preview_path.exists() and preview_path.stat().st_size):\n return None\n try:\n preview_section = ResultImageSection(request, \"OneNote File Image Preview.\")\n preview_section.add_image(\n str(preview_path),\n name=preview_path.name,\n description=\"OneNote file converted to PNG.\",\n )\n return preview_section\n except UnidentifiedImageError:\n request.add_supplementary(\n str(preview_path),\n name=preview_path.name,\n description=\"OneNote file converted to PNG.\",\n )\n return ResultSection(\n \"OneNote File Image Preview.\",\n body=(\n \"Preview was generated but could not be displayed.\"\n f\"\\nSee supplimentary file [{preview_path.name}]\"\n ),\n )\n\n def _make_images_section(self, request: ServiceRequest, images_dir: Path) -> ResultImageSection | None:\n def add_image(section: ResultImageSection, path: Path) -> bool:\n \"\"\"Helper function for error handling ResultImageSection.add_image()\"\"\"\n try:\n section.add_image(\n str(path),\n name=path.name,\n description=\"image extracted from OneNote.\",\n )\n return True\n except UnidentifiedImageError:\n return False\n\n if not images_dir.exists():\n return None\n images_section = ResultImageSection(request, \"OneNote Embedded Images\")\n if any(\n add_image(images_section, image_path)\n for image_path in images_dir.iterdir()\n if image_path.is_file() and image_path.stat().st_size\n ):\n return images_section\n return None\n\n def _make_text_section(self, request: ServiceRequest, text_dir: Path) -> ResultSection | None:\n if not text_dir.exists():\n return None\n patterns = PatternMatch()\n results: dict[str, list[str]] = defaultdict(list)\n tags: dict[str, list[str]] = defaultdict(list)\n for page in text_dir.iterdir():\n if not page.is_file():\n continue\n with page.open(\"r\") as f:\n text = f.read()\n if page.name.startswith(\"1_\"): # Keep potential passwords from the first page\n passwords = extract_passwords(text)\n if \"passwords\" in request.temp_submission_data:\n request.temp_submission_data[\"passwords\"].extend(passwords)\n else:\n request.temp_submission_data[\"passwords\"] = passwords\n tag_type: str\n values: set[bytes]\n for tag_type, values in patterns.ioc_match(text.encode(), True, True).items():\n tags[tag_type].extend(safe_str(tag) for tag in values)\n for detection_type, indicators in detections(text).items():\n results[detection_type].extend(indicators)\n\n if not results and not tags:\n return None\n text_section = ResultSection(\"OneNote Text\")\n if results:\n text_section.add_subsection(\n ResultSection(\n \"Suspicious strings found in OneNote Text\",\n KVSectionBody(**results),\n heuristic=Heuristic(2, signatures={f\"{k}_strings\": len(v) for k, v in results.items()}),\n )\n )\n if tags:\n text_section.add_subsection(\n ResultSection(\n \"Network Indicators found in OneNote Text\",\n KVSectionBody(**tags),\n heuristic=Heuristic(3, signatures={k.replace(\".\", \"_\"): 1 for k, _ in tags.items()}),\n tags=tags,\n )\n )\n return text_section\n\n def _make_hyperlinks_section(self, request: ServiceRequest, hyperlinks_dir: Path) -> None:\n # I have no idea what hyperlinks is supposed to be, adding as supplimentary so we can monitor it\n if not hyperlinks_dir.exists():\n return\n expected_file = hyperlinks_dir / \"onenote_hyperlinks.txt\"\n if not expected_file.exists() or not expected_file.is_file():\n return\n request.add_supplementary(\n str(expected_file), request.sha256[:8] + \"_onenote_hyperlinks.txt\", \"OneNoteAnalyzer Hyperlinks file\"\n )\n","repo_name":"CybercentreCanada/assemblyline-service-onenoteanalyzer","sub_path":"onenoteanalyzer/onenoteanalyzer.py","file_name":"onenoteanalyzer.py","file_ext":"py","file_size_in_byte":9394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"7318821107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = \"Ivo Marvan\"\n__email__ = \"ivo@marvan.cz\"\n__description__ = '''\n Normalized result for landmarks detector (prediktor)\n'''\n\nimport sys\nimport os\n\n# root of project repository\nTHE_FILE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_ROOT = os.path.abspath(os.path.join(THE_FILE_DIR, '..', '..', '..', '..', '..'))\nsys.path.append(PROJECT_ROOT)\n\nfrom src.img.container.result import ImageProcessorResult\nfrom src.img.processor.processor import ImgProcessor\n\nclass SquereCropResult(ImageProcessorResult):\n\n def __init__(self, processor: ImgProcessor, time_ms: int = None, img_scale: float = 1.0):\n super().__init__(processor=processor, time_ms=time_ms)\n self._img_scale = img_scale\n\n def get_img_scale(self) -> float:\n return self._img_scale\n\n def __str__(self):\n s = super().__str__()\n s += f'(scale:{self.get_img_scale()})'\n return s","repo_name":"ivomarvan/quick_faces","sub_path":"src/img/processor/reformat/squere_crop/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"18817365922","text":"import json\r\n\r\n\r\n#Open and load data.json into wordList\r\nfile = open('data.json')\r\ndata = json.load(file)\r\nwordList = []\r\nfor i in data:\r\n wordList.append(i)\r\n \r\n#Input Prompt\r\nword = []\r\nwhile(1):\r\n letter = input(\"Enter a letter: \")\r\n word.append(letter)\r\n word2 = \"\".join(word)\r\n if (len(word)>3) and (word2 in wordList):\r\n print(\"You lost! Better luck next time.\")\r\n break\r\n\r\n","repo_name":"TristanT25/Word-Off","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"75249738995","text":"\"\"\"Light for Shelly.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nfrom typing import Any\n\nfrom aioshelly import Block\nimport async_timeout\n\nfrom openpeerpower.components.light import (\n ATTR_BRIGHTNESS,\n ATTR_COLOR_TEMP,\n ATTR_EFFECT,\n ATTR_RGB_COLOR,\n ATTR_RGBW_COLOR,\n COLOR_MODE_BRIGHTNESS,\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_ONOFF,\n COLOR_MODE_RGB,\n COLOR_MODE_RGBW,\n SUPPORT_EFFECT,\n LightEntity,\n brightness_supported,\n)\nfrom openpeerpower.core import callback\nfrom openpeerpower.util.color import (\n color_temperature_kelvin_to_mired,\n color_temperature_mired_to_kelvin,\n)\n\nfrom . import ShellyDeviceWrapper\nfrom .const import (\n AIOSHELLY_DEVICE_TIMEOUT_SEC,\n COAP,\n DATA_CONFIG_ENTRY,\n DOMAIN,\n KELVIN_MAX_VALUE,\n KELVIN_MIN_VALUE_COLOR,\n KELVIN_MIN_VALUE_WHITE,\n SHBLB_1_RGB_EFFECTS,\n STANDARD_RGB_EFFECTS,\n)\nfrom .entity import ShellyBlockEntity\nfrom .utils import async_remove_shelly_entity\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(opp, config_entry, async_add_entities):\n \"\"\"Set up lights for device.\"\"\"\n wrapper = opp.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]\n\n blocks = []\n for block in wrapper.device.blocks:\n if block.type == \"light\":\n blocks.append(block)\n elif block.type == \"relay\":\n appliance_type = wrapper.device.settings[\"relays\"][int(block.channel)].get(\n \"appliance_type\"\n )\n if appliance_type and appliance_type.lower() == \"light\":\n blocks.append(block)\n unique_id = (\n f'{wrapper.device.shelly[\"mac\"]}-{block.type}_{block.channel}'\n )\n await async_remove_shelly_entity(opp, \"switch\", unique_id)\n\n if not blocks:\n return\n\n async_add_entities(ShellyLight(wrapper, block) for block in blocks)\n\n\nclass ShellyLight(ShellyBlockEntity, LightEntity):\n \"\"\"Switch that controls a relay block on Shelly devices.\"\"\"\n\n def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:\n \"\"\"Initialize light.\"\"\"\n super().__init__(wrapper, block)\n self.control_result = None\n self.mode_result = None\n self._supported_color_modes = set()\n self._supported_features = 0\n self._min_kelvin = KELVIN_MIN_VALUE_WHITE\n self._max_kelvin = KELVIN_MAX_VALUE\n\n if hasattr(block, \"red\") and hasattr(block, \"green\") and hasattr(block, \"blue\"):\n self._min_kelvin = KELVIN_MIN_VALUE_COLOR\n if hasattr(block, \"white\"):\n self._supported_color_modes.add(COLOR_MODE_RGBW)\n else:\n self._supported_color_modes.add(COLOR_MODE_RGB)\n\n if hasattr(block, \"colorTemp\"):\n self._supported_color_modes.add(COLOR_MODE_COLOR_TEMP)\n\n if not self._supported_color_modes:\n if hasattr(block, \"brightness\") or hasattr(block, \"gain\"):\n self._supported_color_modes.add(COLOR_MODE_BRIGHTNESS)\n else:\n self._supported_color_modes.add(COLOR_MODE_ONOFF)\n\n if hasattr(block, \"effect\"):\n self._supported_features |= SUPPORT_EFFECT\n\n @property\n def supported_features(self) -> int:\n \"\"\"Supported features.\"\"\"\n return self._supported_features\n\n @property\n def is_on(self) -> bool:\n \"\"\"If light is on.\"\"\"\n if self.control_result:\n return self.control_result[\"ison\"]\n\n return self.block.output\n\n @property\n def mode(self) -> str | None:\n \"\"\"Return the color mode of the light.\"\"\"\n if self.mode_result:\n return self.mode_result[\"mode\"]\n\n if hasattr(self.block, \"mode\"):\n return self.block.mode\n\n if (\n hasattr(self.block, \"red\")\n and hasattr(self.block, \"green\")\n and hasattr(self.block, \"blue\")\n ):\n return \"color\"\n\n return \"white\"\n\n @property\n def brightness(self) -> int | None:\n \"\"\"Return the brightness of this light between 0..255.\"\"\"\n if self.mode == \"color\":\n if self.control_result:\n brightness_pct = self.control_result[\"gain\"]\n else:\n brightness_pct = self.block.gain\n else:\n if self.control_result:\n brightness_pct = self.control_result[\"brightness\"]\n else:\n brightness_pct = self.block.brightness\n\n return round(255 * brightness_pct / 100)\n\n @property\n def color_mode(self) -> str | None:\n \"\"\"Return the color mode of the light.\"\"\"\n if self.mode == \"color\":\n if hasattr(self.block, \"white\"):\n return COLOR_MODE_RGBW\n return COLOR_MODE_RGB\n\n if hasattr(self.block, \"colorTemp\"):\n return COLOR_MODE_COLOR_TEMP\n\n if hasattr(self.block, \"brightness\") or hasattr(self.block, \"gain\"):\n return COLOR_MODE_BRIGHTNESS\n\n return COLOR_MODE_ONOFF\n\n @property\n def rgb_color(self) -> tuple[int, int, int]:\n \"\"\"Return the rgb color value [int, int, int].\"\"\"\n if self.control_result:\n red = self.control_result[\"red\"]\n green = self.control_result[\"green\"]\n blue = self.control_result[\"blue\"]\n else:\n red = self.block.red\n green = self.block.green\n blue = self.block.blue\n return (red, green, blue)\n\n @property\n def rgbw_color(self) -> tuple[int, int, int, int]:\n \"\"\"Return the rgbw color value [int, int, int, int].\"\"\"\n if self.control_result:\n white = self.control_result[\"white\"]\n else:\n white = self.block.white\n\n return (*self.rgb_color, white)\n\n @property\n def color_temp(self) -> int | None:\n \"\"\"Return the CT color value in mireds.\"\"\"\n if self.control_result:\n color_temp = self.control_result[\"temp\"]\n else:\n color_temp = self.block.colorTemp\n\n color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))\n\n return int(color_temperature_kelvin_to_mired(color_temp))\n\n @property\n def min_mireds(self) -> int:\n \"\"\"Return the coldest color_temp that this light supports.\"\"\"\n return int(color_temperature_kelvin_to_mired(self._max_kelvin))\n\n @property\n def max_mireds(self) -> int:\n \"\"\"Return the warmest color_temp that this light supports.\"\"\"\n return int(color_temperature_kelvin_to_mired(self._min_kelvin))\n\n @property\n def supported_color_modes(self) -> set | None:\n \"\"\"Flag supported color modes.\"\"\"\n return self._supported_color_modes\n\n @property\n def effect_list(self) -> list[str] | None:\n \"\"\"Return the list of supported effects.\"\"\"\n if not self.supported_features & SUPPORT_EFFECT:\n return None\n\n if self.wrapper.model == \"SHBLB-1\":\n return list(SHBLB_1_RGB_EFFECTS.values())\n\n return list(STANDARD_RGB_EFFECTS.values())\n\n @property\n def effect(self) -> str | None:\n \"\"\"Return the current effect.\"\"\"\n if not self.supported_features & SUPPORT_EFFECT:\n return None\n\n if self.control_result:\n effect_index = self.control_result[\"effect\"]\n else:\n effect_index = self.block.effect\n\n if self.wrapper.model == \"SHBLB-1\":\n return SHBLB_1_RGB_EFFECTS[effect_index]\n\n return STANDARD_RGB_EFFECTS[effect_index]\n\n async def async_turn_on(self, **kwargs) -> None:\n \"\"\"Turn on light.\"\"\"\n if self.block.type == \"relay\":\n self.control_result = await self.set_state(turn=\"on\")\n self.async_write_op_state()\n return\n\n set_mode = None\n supported_color_modes = self._supported_color_modes\n params: dict[str, Any] = {\"turn\": \"on\"}\n\n if ATTR_BRIGHTNESS in kwargs and brightness_supported(supported_color_modes):\n brightness_pct = int(100 * (kwargs[ATTR_BRIGHTNESS] + 1) / 255)\n if hasattr(self.block, \"gain\"):\n params[\"gain\"] = brightness_pct\n if hasattr(self.block, \"brightness\"):\n params[\"brightness\"] = brightness_pct\n\n if ATTR_COLOR_TEMP in kwargs and COLOR_MODE_COLOR_TEMP in supported_color_modes:\n color_temp = color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])\n color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))\n # Color temperature change - used only in white mode, switch device mode to white\n set_mode = \"white\"\n params[\"temp\"] = int(color_temp)\n\n if ATTR_RGB_COLOR in kwargs and COLOR_MODE_RGB in supported_color_modes:\n # Color channels change - used only in color mode, switch device mode to color\n set_mode = \"color\"\n (params[\"red\"], params[\"green\"], params[\"blue\"]) = kwargs[ATTR_RGB_COLOR]\n\n if ATTR_RGBW_COLOR in kwargs and COLOR_MODE_RGBW in supported_color_modes:\n # Color channels change - used only in color mode, switch device mode to color\n set_mode = \"color\"\n (params[\"red\"], params[\"green\"], params[\"blue\"], params[\"white\"]) = kwargs[\n ATTR_RGBW_COLOR\n ]\n\n if ATTR_EFFECT in kwargs:\n # Color effect change - used only in color mode, switch device mode to color\n set_mode = \"color\"\n if self.wrapper.model == \"SHBLB-1\":\n effect_dict = SHBLB_1_RGB_EFFECTS\n else:\n effect_dict = STANDARD_RGB_EFFECTS\n if kwargs[ATTR_EFFECT] in effect_dict.values():\n params[\"effect\"] = [\n k for k, v in effect_dict.items() if v == kwargs[ATTR_EFFECT]\n ][0]\n else:\n _LOGGER.error(\n \"Effect '%s' not supported by device %s\",\n kwargs[ATTR_EFFECT],\n self.wrapper.model,\n )\n\n if await self.set_light_mode(set_mode):\n self.control_result = await self.set_state(**params)\n\n self.async_write_op_state()\n\n async def async_turn_off(self, **kwargs) -> None:\n \"\"\"Turn off light.\"\"\"\n self.control_result = await self.set_state(turn=\"off\")\n self.async_write_op_state()\n\n async def set_light_mode(self, set_mode):\n \"\"\"Change device mode color/white if mode has changed.\"\"\"\n if set_mode is None or self.mode == set_mode:\n return True\n\n _LOGGER.debug(\"Setting light mode for entity %s, mode: %s\", self.name, set_mode)\n try:\n async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):\n self.mode_result = await self.wrapper.device.switch_light_mode(set_mode)\n except (asyncio.TimeoutError, OSError) as err:\n _LOGGER.error(\n \"Setting light mode for entity %s failed, state: %s, error: %s\",\n self.name,\n set_mode,\n repr(err),\n )\n self.wrapper.last_update_success = False\n return False\n\n return True\n\n @callback\n def _update_callback(self):\n \"\"\"When device updates, clear control & mode result that overrides state.\"\"\"\n self.control_result = None\n self.mode_result = None\n super()._update_callback()\n","repo_name":"OpenPeerPower/core","sub_path":"openpeerpower/components/shelly/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"5574840302","text":"import logging\nimport warnings\nfrom types import ModuleType\n\n__all__ = [\n \"ATOMNS\",\n \"DEFAULT_NAME_SPACES\",\n \"FORCE3D\",\n \"GXNS\",\n \"KMLNS\",\n \"register_namespaces\",\n \"set_default_namespaces\",\n \"set_etree_implementation\",\n]\n\ntry: # pragma: no cover\n from lxml import etree\n\nexcept ImportError: # pragma: no cover\n warnings.warn(\"Package `lxml` missing. Pretty print will be disabled\")\n import xml.etree.ElementTree as etree # type: ignore[no-redef] # noqa: N813\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef set_etree_implementation(implementation: ModuleType) -> None:\n \"\"\"Set the etree implementation to use.\"\"\"\n global etree\n etree = implementation\n\n\nKMLNS = \"{http://www.opengis.net/kml/2.2}\" # noqa: FS003\nATOMNS = \"{http://www.w3.org/2005/Atom}\" # noqa: FS003\nGXNS = \"{http://www.google.com/kml/ext/2.2}\" # noqa: FS003\n\nDEFAULT_NAME_SPACES = {\n \"kml\": KMLNS[1:-1],\n \"atom\": ATOMNS[1:-1],\n \"gx\": GXNS[1:-1],\n}\n\n\ndef register_namespaces(**namespaces: str) -> None:\n \"\"\"Register namespaces for use in etree.ElementTree.parse().\"\"\"\n try:\n for prefix, uri in namespaces.items():\n etree.register_namespace(prefix, uri)\n except AttributeError: # pragma: no cover\n logger.warning(\"Namespaces were not registered.\")\n\n\ndef set_default_namespaces() -> None:\n \"\"\"Register the default namespaces for use in etree.ElementTree.parse().\"\"\"\n register_namespaces(**DEFAULT_NAME_SPACES)\n\n\nset_default_namespaces()\n\nFORCE3D = False\n","repo_name":"cleder/fastkml","sub_path":"fastkml/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"4"} +{"seq_id":"35930957772","text":"from typing import Any, Dict, Optional\n\nfrom pymongo.mongo_client import MongoClient\nfrom pymongo.server_api import ServerApi\n\nfrom app_config import AppConfig\nfrom app_logger import AppLogger\n\n\nclass MongoDBClient:\n app_config: AppConfig\n app_logger: AppLogger\n client: Optional[MongoClient] = None\n\n def __init__(self, app_config: AppConfig, app_logger: AppLogger):\n self.app_config = app_config\n self.app_logger = app_logger\n\n def connect(self):\n if not self.client:\n uri = self.app_config.get(\"DEFAULT\", \"uri\")\n self.client = MongoClient(uri, server_api=ServerApi(\"1\"))\n\n try:\n self.client.admin.command(\"ping\")\n\n except Exception as error:\n raise Exception from error\n\n def database_command(self, database_name: str, command: dict):\n database = self.client[database_name]\n\n database.command(command)\n\n def collection_create(self, database_name: str, collection_name: str) -> None:\n database = self.client[database_name]\n\n database.create_collection(\n collection_name\n )\n\n def collection_drop(self, database_name: str, collection_name: str) -> None:\n database = self.client[database_name]\n collection = database[collection_name]\n\n collection.drop()\n\n def collection_find_one(self, database_name: str, collection_name: str, filter: dict):\n try:\n database = self.client[database_name]\n collection = database[collection_name]\n\n return collection.find_one(filter)\n\n except Exception as error:\n raise ValueError from error\n\n def collection_list(self, database_name: str, collection_name: str) -> list:\n database = self.client[database_name]\n collection = database[collection_name]\n\n all_documents = collection.find({})\n\n return list(all_documents)\n\n def collection_insert(self, database_name: str, collection_name: str, data: Dict[str, Any]):\n try:\n database = self.client[database_name]\n collection = database[collection_name]\n\n return collection.insert_one(data).inserted_id\n\n except Exception as error:\n raise ValueError from error\n\n def wix_order_replicate(self, database_name: str, collection_name: str, data_list: list) -> None:\n database = self.client[database_name]\n collection = database[collection_name]\n\n for obj in data_list:\n # Check if the document with the same ID exists in MongoDB\n existing_document = collection.find_one({\"id\": obj[\"id\"]})\n\n if existing_document:\n # Compare lastUpdated dates\n if obj[\"lastUpdated\"] > existing_document[\"lastUpdated\"]:\n # Update the document in MongoDB\n collection.update_one(\n {\"_id\": existing_document[\"_id\"]}, {\"$set\": obj})\n else:\n # Insert as a new document in MongoDB\n collection.insert_one(obj)\n","repo_name":"igorpuorro/ecommerce","sub_path":"mongodb_client.py","file_name":"mongodb_client.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15914081260","text":"import os\nimport unittest\n\nfrom email.iterators import _structure as structure\nfrom email.mime.text import MIMEText\nfrom io import StringIO\nfrom mailman.app.lifecycle import create_list\nfrom mailman.config import config\nfrom mailman.email.message import Message\nfrom mailman.interfaces.member import DeliveryMode\nfrom mailman.interfaces.template import ITemplateManager\nfrom mailman.runners.digest import DigestRunner\nfrom mailman.testing.helpers import (\n LogFileMark, digest_mbox, get_queue_messages, make_digest_messages,\n make_testable_runner, message_from_string,\n specialized_message_from_string as mfs,\n subscribe)\nfrom mailman.testing.layers import ConfigLayer\nfrom string import Template\nfrom tempfile import TemporaryDirectory\nfrom zope.component import getUtility\n\n\nclass TestDigest(unittest.TestCase):\n \"\"\"Test the digest runner.\"\"\"\n\n layer = ConfigLayer\n maxDiff = None\n\n def setUp(self):\n self._mlist = create_list('test@example.com')\n self._mlist.send_welcome_message = False\n self._mlist.digest_size_threshold = 1\n self._digestq = config.switchboards['digest']\n self._shuntq = config.switchboards['shunt']\n self._virginq = config.switchboards['virgin']\n self._runner = make_testable_runner(DigestRunner, 'digest')\n self._process = config.handlers['to-digest'].process\n\n def _check_virgin_queue(self):\n # There should be two messages in the virgin queue: the digest as\n # plain-text and as multipart.\n items = get_queue_messages('virgin', expected_count=2)\n self.assertEqual(\n sorted(item.msg.get_content_type() for item in items),\n ['multipart/mixed', 'text/plain'])\n for item in items:\n self.assertEqual(item.msg['subject'],\n 'Test Digest, Vol 1, Issue 1')\n\n def test_simple_message(self):\n # Subscribe some users receiving digests.\n anne = subscribe(self._mlist, 'Anne')\n anne.preferences.delivery_mode = DeliveryMode.mime_digests\n bart = subscribe(self._mlist, 'Bart')\n bart.preferences.delivery_mode = DeliveryMode.plaintext_digests\n make_digest_messages(self._mlist)\n self._check_virgin_queue()\n # The digest mbox and all intermediary mboxes must have been removed\n # (GL #259).\n self.assertEqual(os.listdir(self._mlist.data_path), [])\n\n def test_non_ascii_message(self):\n # Subscribe some users receiving digests.\n anne = subscribe(self._mlist, 'Anne')\n anne.preferences.delivery_mode = DeliveryMode.mime_digests\n bart = subscribe(self._mlist, 'Bart')\n bart.preferences.delivery_mode = DeliveryMode.plaintext_digests\n msg = Message()\n msg['From'] = 'anne@example.org'\n msg['To'] = 'test@example.com'\n msg['Content-Type'] = 'multipart/mixed'\n msg.attach(MIMEText('message with non-ascii chars: \\xc3\\xa9',\n 'plain', 'utf-8'))\n mbox = digest_mbox(self._mlist)\n mbox.add(msg.as_string())\n # Use any error logs as the error message if the test fails.\n error_log = LogFileMark('mailman.error')\n make_digest_messages(self._mlist, msg)\n # The runner will send the file to the shunt queue on exception.\n self.assertEqual(len(self._shuntq.files), 0, error_log.read())\n self._check_virgin_queue()\n\n def test_mime_digest_format(self):\n # Make sure that the format of the MIME digest is as expected.\n self._mlist.digest_size_threshold = 0.6\n self._mlist.volume = 1\n self._mlist.next_digest_number = 1\n self._mlist.send_welcome_message = False\n # Subscribe some users receiving digests.\n anne = subscribe(self._mlist, 'Anne')\n anne.preferences.delivery_mode = DeliveryMode.mime_digests\n bart = subscribe(self._mlist, 'Bart')\n bart.preferences.delivery_mode = DeliveryMode.plaintext_digests\n # Fill the digest.\n process = config.handlers['to-digest'].process\n size = 0\n for i in range(1, 5):\n text = Template(\"\"\"\\\nFrom: aperson@example.com\nTo: xtest@example.com\nSubject: Test message $i\nList-Post: <test@example.com>\n\nHere is message $i\n\"\"\").substitute(i=i)\n msg = message_from_string(text)\n process(self._mlist, msg, {})\n size += len(text)\n if size >= self._mlist.digest_size_threshold * 1024:\n break\n # Run the digest runner to create the MIME and RFC 1153 digests.\n runner = make_testable_runner(DigestRunner)\n runner.run()\n items = get_queue_messages('virgin', expected_count=2)\n # Find the MIME one.\n mime_digest = None\n for item in items:\n if item.msg.is_multipart():\n assert mime_digest is None, 'We got two MIME digests'\n mime_digest = item.msg\n fp = StringIO()\n # Verify the structure is what we expect.\n structure(mime_digest, fp)\n self.assertMultiLineEqual(fp.getvalue(), \"\"\"\\\nmultipart/mixed\n text/plain\n text/plain\n multipart/digest\n message/rfc822\n text/plain\n message/rfc822\n text/plain\n message/rfc822\n text/plain\n message/rfc822\n text/plain\n text/plain\n\"\"\")\n\n def test_issue141(self):\n # Currently DigestMode.summary_digests are equivalent to mime_digests.\n # This also tests GL issue 234.\n self._mlist.send_welcome_message = False\n bart = subscribe(self._mlist, 'Bart')\n bart.preferences.delivery_mode = DeliveryMode.summary_digests\n make_digest_messages(self._mlist)\n # There should be one message in the outgoing queue, destined for\n # Bart, formatted as a MIME digest.\n items = get_queue_messages('virgin', expected_count=1)\n # Bart is the only recipient.\n self.assertEqual(items[0].msgdata['recipients'],\n set(['bperson@example.com']))\n # The message is a MIME digest, with the structure we expect.\n fp = StringIO()\n structure(items[0].msg, fp)\n self.assertMultiLineEqual(fp.getvalue(), \"\"\"\\\nmultipart/mixed\n text/plain\n text/plain\n multipart/digest\n message/rfc822\n text/plain\n text/plain\n\"\"\")\n\n def test_issue141_one_last_digest(self):\n # Currently DigestMode.summary_digests are equivalent to mime_digests.\n # Also tests issue 234.\n self._mlist.send_welcome_message = False\n bart = subscribe(self._mlist, 'Bart')\n self._mlist.send_one_last_digest_to(\n bart.address, DeliveryMode.summary_digests)\n make_digest_messages(self._mlist)\n # There should be one message in the outgoing queue, destined for\n # Bart, formatted as a MIME digest.\n items = get_queue_messages('virgin', expected_count=1)\n # Bart is the only recipient.\n self.assertEqual(items[0].msgdata['recipients'],\n set(['bperson@example.com']))\n # The message is a MIME digest, with the structure we expect.\n fp = StringIO()\n structure(items[0].msg, fp)\n self.assertMultiLineEqual(fp.getvalue(), \"\"\"\\\nmultipart/mixed\n text/plain\n text/plain\n multipart/digest\n message/rfc822\n text/plain\n text/plain\n\"\"\")\n\n\nclass TestI18nDigest(unittest.TestCase):\n layer = ConfigLayer\n maxDiff = None\n\n def setUp(self):\n config.push('french', \"\"\"\n [mailman]\n default_language: fr\n \"\"\")\n self.addCleanup(config.pop, 'french')\n self._mlist = create_list('test@example.com')\n self._mlist.send_welcome_message = False\n self._mlist.preferred_language = 'fr'\n self._mlist.digest_size_threshold = 0\n self._process = config.handlers['to-digest'].process\n self._runner = make_testable_runner(DigestRunner)\n # Add a French version of the digest masthead.\n tempdir = TemporaryDirectory()\n self.addCleanup(tempdir.cleanup)\n french_path = os.path.join(tempdir.name, 'fr', 'masthead.txt')\n os.makedirs(os.path.dirname(french_path))\n with open(french_path, 'w', encoding='utf-8') as fp:\n print(\"\"\"\\\nEnvoyez vos messages pour la liste $display_name à\n\\t$got_list_email\n\nPour vous (dés)abonner par courriel, envoyez un message avec « help » dans\nle corps ou dans le sujet à\n\\t$got_request_email\n\nVous pouvez contacter l'administrateur de la liste à l'adresse\n\\t$got_owner_email\n\nSi vous répondez, n'oubliez pas de changer l'objet du message afin\nqu'il soit plus spécifique que « Re: Contenu du groupe de $display_name...\n\"\"\", file=fp)\n getUtility(ITemplateManager).set(\n 'list:member:digest:masthead', self._mlist.list_id,\n 'file:///{}/$language/masthead.txt'.format(tempdir.name))\n\n def test_multilingual_digest(self):\n # When messages come in with a content-type character set different\n # than that of the list's preferred language, recipients will get an\n # internationalized digest.\n #\n # Subscribe some users receiving digests.\n anne = subscribe(self._mlist, 'Anne')\n anne.preferences.delivery_mode = DeliveryMode.mime_digests\n bart = subscribe(self._mlist, 'Bart')\n bart.preferences.delivery_mode = DeliveryMode.plaintext_digests\n msg = mfs(\"\"\"\\\nFrom: aperson@example.org\nTo: test@example.com\nSubject: =?iso-2022-jp?b?GyRCMGxIVhsoQg==?=\nMIME-Version: 1.0\nContent-Type: text/plain; charset=iso-2022-jp\nContent-Transfer-Encoding: 7bit\n\n\\x1b$B0lHV\\x1b(B\n\"\"\")\n self._process(self._mlist, msg, {})\n self._runner.run()\n # There are two digests in the virgin queue; one is the MIME digest\n # and the other is the RFC 1153 digest.\n items = get_queue_messages('virgin', expected_count=2)\n if items[0].msg.is_multipart():\n mime, rfc1153 = items[0].msg, items[1].msg\n else:\n rfc1153, mime = items[0].msg, items[1].msg\n # The MIME version contains a mix of French and Japanese. The digest\n # chrome added by Mailman is in French.\n self.assertEqual(mime['subject'].encode(),\n '=?iso-8859-1?q?Groupe_Test=2C_Vol_1=2C_Parution_1?=')\n self.assertEqual(str(mime['subject']),\n 'Groupe Test, Vol 1, Parution 1')\n # The first subpart contains the iso-8859-1 masthead.\n masthead = mime.get_payload(0).get_payload(decode=True).decode(\n 'iso-8859-1')\n self.assertMultiLineEqual(masthead.splitlines()[0],\n 'Envoyez vos messages pour la liste Test à')\n # The second subpart contains the utf-8 table of contents.\n self.assertEqual(mime.get_payload(1)['content-description'],\n \"Today's Topics (1 messages)\")\n toc = mime.get_payload(1).get_payload(decode=True).decode('utf-8')\n self.assertMultiLineEqual(toc.splitlines()[0], 'Thèmes du jour :')\n # The third subpart is a multipart/digest part and its first subpart\n # contains the posted message in Japanese.\n self.assertEqual(mime.get_payload(2).get_content_type(),\n 'multipart/digest')\n self.assertEqual(mime.get_payload(2).get_payload(0).get_content_type(),\n 'message/rfc822')\n post = mime.get_payload(2).get_payload(0).get_payload(0)\n self.assertEqual(post['subject'], '=?iso-2022-jp?b?GyRCMGxIVhsoQg==?=')\n # Compare the bytes so that this module doesn't contain string\n # literals in multiple incompatible character sets.\n self.assertEqual(post.get_payload(decode=True), b'\\x1b$B0lHV\\x1b(B\\n')\n # The RFC 1153 digest will have the same subject, but its payload will\n # be recast into utf-8.\n self.assertEqual(str(rfc1153['subject']),\n 'Groupe Test, Vol 1, Parution 1')\n self.assertEqual(rfc1153.get_charset(), 'utf-8')\n lines = rfc1153.get_payload(decode=True).decode('utf-8').splitlines()\n self.assertEqual(lines[0], 'Envoyez vos messages pour la liste Test à')\n","repo_name":"masomel/py-import-analysis","sub_path":"libs/Mailman/mailman/runners/tests/test_digest.py","file_name":"test_digest.py","file_ext":"py","file_size_in_byte":12268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"29128752085","text":"from flask import Flask, request, render_template\nimport os\nimport uuid\nimport json\nfrom coco import *\napp = Flask(__name__)\n\n@app.route('/upload_img', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n file = request.files['file']\n extension = os.path.splitext(file.filename)[1]\n f_name = str(uuid.uuid4()) + extension\n file.save(os.path.join(\"\", f_name))\n translate(f_name)\n return json.dumps(translate(f_name))\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5001)","repo_name":"jhihwei/Story_Book_for_AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3176815528","text":"string = list(input())\nsize = int(input())\nmatrix = [[j for j in list(input())] for i in range(size)]\nnumber = int(input())\n\nplayer_x, player_y = next([i, j] for i in range(size) for j in range(size) if matrix[i][j] == \"P\")\n\nmovement = {\n 'up': lambda x, y: (x - 1, y),\n 'down': lambda x, y: (x + 1, y),\n 'left': lambda x, y: (x, y - 1),\n 'right': lambda x, y: (x, y + 1),\n}\n\nfor n in range(number):\n direction = input()\n p_x, p_y = movement[direction](player_x, player_y)\n\n if p_x not in range(size) or p_y not in range(size) and string:\n string.pop()\n continue\n\n hit = matrix[p_x][p_y]\n if hit.isalpha():\n string.append(hit)\n\n matrix[player_x][player_y] = '-'\n player_x, player_y = p_x, p_y\n matrix[player_x][player_y] = 'P'\n\nprint(\"\".join(string))\n\nfor i in matrix:\n print(*i, sep='')\n","repo_name":"simonen/PythonAdvanced","sub_path":"Python Advanced - Exams/02. Game of Words.py","file_name":"02. Game of Words.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32638155674","text":"from config.dbconfig import rg_config\nimport psycopg2\nclass RequestsDAO:\n def __init__(self):\n\n connection_url = \"dbname=%s user=%s password=%s\" % (rg_config['dbname'],\n rg_config['user'],\n rg_config['passwd'])\n self.conn = psycopg2._connect(connection_url)\n\n def confirmAResource(self, rid):\n cursor = self.conn.cursor()\n query = \"select rname from resources where rid = %s;\"\n cursor.execute(query, (rid,))\n result = cursor.fetchone()\n return result\n\n def getARequestedResource(self, rid):\n cursor = self.conn.cursor()\n query = \"select rid, rname, rprice, qty from resources natural inner join requests where rid = %s;\"\n cursor.execute(query, (rid,))\n result = cursor.fetchone()\n return result\n\n def getRequestedResources(self):\n cursor = self.conn.cursor()\n query = \"select rid, rname, rprice, qty from resources natural inner join requests;\"\n cursor.execute(query)\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getRequestByKeyword(self, keyword):\n cursor = self.conn.cursor()\n query = \"select rid, rname, rprice, qty from resources natural inner join requests where rname = %s order by rname;\"\n cursor.execute(query, (keyword,))\n result = cursor.fetchone()\n return result\n\n def addRequestedResource(self, uid, rid, qty):\n cursor = self.conn.cursor()\n query = \"insert into requests(uid, rid, qty) values (%s, %s, %s);\"\n cursor.execute(query, (uid, rid, qty,))\n self.conn.commit()\n return uid\n\n def addPurchase(self, sellerid, uid, cid, ptotal, pdate):\n cursor = self.conn.cursor()\n query = \"insert into purchases(sellerid, uid, cid, ptotal, pdate) values (%s, %s, %s, %s, %s) returning pid;\"\n cursor.execute(query, (sellerid, uid, cid, ptotal, pdate,))\n pid = cursor.fetchone()[0]\n self.conn.commit()\n return pid\n\n def addResourceSales(self, pid, rid, qty, pprice):\n cursor = self.conn.cursor()\n query = \"insert into resourcesales(pid, rid, qty, pprice) values (%s, %s, %s, %s);\"\n cursor.execute(query, (pid, rid, qty, pprice,))\n self.conn.commit()\n return pid\n\n def getResourcesPrice(self, uid, rid):\n cursor = self.conn.cursor()\n query = \"select pprice from supplies where uid = %s and rid = %s;\"\n cursor.execute(query, (uid, rid,))\n result = cursor.fetchone()\n return result\n\n def getSellersQty(self, uid, rid):\n cursor = self.conn.cursor()\n query = \"select qty from supplies where uid = %s and rid = %s;\"\n cursor.execute(query, (uid, rid,))\n result = cursor.fetchone()\n return result\n\n def updateSellersQty(self, sub, uid, rid):\n cursor = self.conn.cursor()\n query = \"update supplies set qty = (qty - %s) where uid = %s and rid = %s;\"\n cursor.execute(query, (sub, uid, rid,))\n self.conn.commit()\n return rid","repo_name":"Orlando-RL/Phase-1-ICOM5016-","sub_path":"dao/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39082376163","text":"import torch\n\n\n# Base class for RL tasks\nclass BaseEnv:\n\n def __init__(self, cfg):\n self.envs = None\n self.num_envs = cfg.env.num_envs\n self.num_obs = cfg.env.num_observations\n self.num_privileged_obs = cfg.env.num_privileged_obs\n self.num_actions = cfg.env.num_actions\n self.device = cfg.env.device\n self.dtype = cfg.env.dtype\n\n # allocate buffers\n self.obs_buf = torch.zeros(self.num_envs, self.num_obs, device=self.device, dtype=self.dtype)\n self.rew_buf = torch.zeros(self.num_envs, device=self.device, dtype=self.dtype)\n self.reset_buf = torch.ones(self.num_envs, device=self.device, dtype=torch.long)\n self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)\n self.time_out_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)\n\n if self.num_privileged_obs is not None:\n self.privileged_obs_buf = torch.zeros(self.num_envs, self.num_privileged_obs, device=self.device,\n dtype=self.dtype)\n else:\n self.privileged_obs_buf = None\n # self.num_privileged_obs = self.num_obs\n\n self.extras = {}\n\n # create envs, sim and viewer\n self.create_sim()\n\n def create_sim(self):\n raise NotImplementedError\n\n def get_observations(self):\n return self.obs_buf\n\n def get_privileged_observations(self):\n return self.privileged_obs_buf\n\n def reset_idx(self, env_ids):\n \"\"\"Reset selected robots\"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\" Reset all robots\"\"\"\n self.envs.reset()\n obs, privileged_obs, _, _, _ = self.step(\n torch.zeros(self.num_envs, self.num_actions, device=self.device, requires_grad=False))\n return obs, privileged_obs\n\n def step(self, actions):\n raise NotImplementedError\n","repo_name":"clearlab-sustech/GaitFreeTemplateModel","sub_path":"tm_gym/envs/base/base_env.py","file_name":"base_env.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"44701121640","text":"\"\"\"Телеграм бот по 3 книге Билла Вильямса Торговый хаос 2\"\"\"\r\nimport json\r\nimport time\r\nimport datetime\r\nimport requests\r\nimport numpy as np\r\nimport websocket\r\nimport pandas as pd\r\n\r\n\r\ntrade_symbol = \"1INCHUSDT\" #торговая пара\r\ntrade_symbol_low = trade_symbol.lower() #торговая пара в нижнем регистре для передечи в websocket\r\nbar_interval = \"5m\" #таймфрейм бара для анализа\r\n\r\n# функция определяет пересекает MA бар\r\ndef sma_crossing_bar(high,low,ma):\r\n if ma < high and ma >low:\r\n return True\r\n else:\r\n return False\r\n\r\n# функция отправки сообщения в Telegram\r\ndef send_telegram(text: str):\r\n token = \"\"\r\n url = \"https://api.telegram.org/bot\"\r\n channel_id = \"\"\r\n url += token\r\n method = url + \"/sendMessage\"\r\n\r\n r = requests.post(method, data={ \"chat_id\": channel_id, \"text\": text})\r\n\r\n if r.status_code != 200:\r\n print (r.status_code)\r\n raise Exception(\"post_text error\")\r\n\r\ndef get_AROON(ohlc_df, lookback=25):\r\n \r\n if len(ohlc_df) < lookback:\r\n return [np.nan]*len(ohlc_df)\r\n \r\n aroon_up = 100 * ohlc_df.high.rolling(lookback + 1).apply(lambda x: x.argmax()) / lookback\r\n aroon_down = 100 * ohlc_df.low.rolling(lookback + 1).apply(lambda x: x.argmin()) / lookback\r\n \r\n return aroon_up, aroon_down\r\n\r\ndef get_RSI(ohlc_df, lookback = 14, ema = True):\r\n \r\n if len(ohlc_df) < lookback:\r\n return [np.nan]*len(ohlc_df)\r\n\r\n close_delta = ohlc_df.close.diff()\r\n\r\n # Make two series: one for lower closes and one for higher closes\r\n up = close_delta.clip(lower=0)\r\n down = -1 * close_delta.clip(upper=0)\r\n \r\n if ema == True:\r\n\t # Use exponential moving average\r\n ma_up = up.ewm(com = lookback - 1, adjust=True, min_periods = lookback).mean()\r\n ma_down = down.ewm(com = lookback - 1, adjust=True, min_periods = lookback).mean()\r\n else:\r\n # Use simple moving average\r\n ma_up = up.rolling(window = lookback, adjust=False).mean()\r\n ma_down = down.rolling(window = lookback, adjust=False).mean()\r\n \r\n rsi = ma_up / ma_down\r\n rsi = 100 - (100/(1 + rsi))\r\n return rsi\r\n\r\n# основная функция, вызывается каждый раз когда в сокет приходит сообщение\r\ndef on_message(ws, message):\r\n \r\n trade = json.loads(message)\r\n\r\n if trade['e'] == \"kline\":\r\n is_this_kline_closed = trade['k']['x']\r\n\r\n if is_this_kline_closed:\r\n\r\n time.sleep(0.2)\r\n #опрееляем тренд по aroon, для торговли на тф 5м, рекомендуют определять тренд по тф 4ч\r\n jsonKlines_4h = requests.get(\"https://fapi.binance.com/fapi/v1/klines?symbol=\" + trade_symbol + \"&interval=4h&limit=102\").json()\r\n dfKlines_4h = pd.DataFrame(jsonKlines_4h, columns=['open_time','open','high','low','close','volume','close_time','quote_volume','trades','buy_asset_volume','buy_quote_volume','ignore'])\r\n dfKlines_4h = dfKlines_4h.astype(float)\r\n dfKlines_4h.set_index(keys=pd.to_datetime(dfKlines_4h['open_time'], unit='ms'), inplace=True)\r\n dfKlines_4h.drop(columns=['open_time','volume','close_time','quote_volume','trades','buy_asset_volume','buy_quote_volume','ignore'],inplace=True)\r\n\r\n # определяем тренд на тф 4 часа\r\n aroon_up_4h, aroon_down_4h = get_AROON(dfKlines_4h, 25)\r\n aroon_trend_up_4h = True if aroon_up_4h.iloc[-1] > 90 and aroon_down_4h.iloc[-1] < 30 else False\r\n aroon_trend_down_4h = True if aroon_down_4h.iloc[-1] > 90 and aroon_up_4h.iloc[-1] < 30 else False\r\n\r\n # получаем последние свечи для расчёта стратегии\r\n jsonKlines = requests.get(\"https://fapi.binance.com/fapi/v1/klines?symbol=\" + trade_symbol + \"&interval=\" + bar_interval + \"&limit=102\").json()\r\n dfKlines = pd.DataFrame(jsonKlines, columns=['open_time','open','high','low','close','volume','close_time','quote_volume','trades','buy_asset_volume','buy_quote_volume','ignore'])\r\n dfKlines = dfKlines.astype(float)\r\n # вычисляем среднюю цену баров\r\n dfAveragePrice = (dfKlines.high + dfKlines.low)/2\r\n # вычисляем Awesome Oscillator\r\n sma5 = dfAveragePrice.rolling(window=5).mean()\r\n sma34 = dfAveragePrice.rolling(window=34).mean()\r\n ao = sma5 - sma34\r\n\r\n # вычисляем RSI\r\n dfRSI_5m = get_RSI(dfKlines)\r\n\r\n close_in_interval = 0 # иногда определение интервала не срабатывает. Чтобы небыло ошибки неизвестной переменной\r\n # определяем является ли бар дивергентным\r\n interval = (dfKlines.high.iloc[-2] - dfKlines.low.iloc[-2]) / 2\r\n\r\n if dfKlines.high.iloc[-2] > dfKlines.close.iloc[-2] and dfKlines.close.iloc[-2] > dfKlines.high.iloc[-2] - interval or dfKlines.high.iloc[-2] == dfKlines.close.iloc[-2]:\r\n close_in_interval = 1\r\n if dfKlines.close.iloc[-2] > dfKlines.low.iloc[-2] + interval and dfKlines.high.iloc[-2] - interval > dfKlines.close.iloc[-2]:\r\n close_in_interval = 2\r\n if dfKlines.close.iloc[-2] > dfKlines.low.iloc[-2] and dfKlines.low.iloc[-2] + interval > dfKlines.close.iloc[-2] or dfKlines.close.iloc[-2] == dfKlines.low.iloc[-2]:\r\n close_in_interval = 3\r\n # Боллинджер\r\n sma20 = dfAveragePrice.rolling(window=20).mean()\r\n bb_up = sma20 + dfKlines.close.rolling(window=20).std()*2\r\n bb_low = sma20 - dfKlines.close.rolling(window=20).std()*2\r\n # определяем \"пробил\" бар нижнюю линию Боллинджера\r\n bb_low_crossing_bar = sma_crossing_bar(dfKlines.high.iloc[-2],dfKlines.low.iloc[-2],bb_low.iloc[-2])\r\n # определяем \"пробил\" бар верхнюю линию Боллинджера\r\n bb_up_crossing_bar = sma_crossing_bar(dfKlines.high.iloc[-2],dfKlines.low.iloc[-2],bb_up.iloc[-2])\r\n\r\n print(aroon_up_4h.iloc[-1],aroon_down_4h.iloc[-1],dfRSI_5m.iloc[-1])\r\n\r\n # условие на покупку в лонг\r\n if (close_in_interval == 1\r\n and ao.iloc[-3] > ao.iloc[-2]\r\n and ao.iloc[-2] < 0\r\n and bb_low_crossing_bar\r\n and dfRSI_5m.iloc[-1] < 30\r\n and aroon_trend_up_4h):\r\n\r\n print(trade_symbol,\"Выполнилось условие на покупку LONG!\")\r\n send_telegram(trade_symbol + \" ВХОД: \" + str(dfKlines.close.iloc[-2]) + \" LONG TP 2% / SL 2%\")\r\n\r\n if (close_in_interval == 3\r\n and ao.iloc[-2] > ao.iloc[-3]\r\n and ao.iloc[-2] > 0\r\n and bb_up_crossing_bar\r\n and dfRSI_5m.iloc[-1] > 70\r\n and aroon_trend_down_4h):\r\n\r\n print(trade_symbol, \"Выполнилось условие на покупку SHORT!\")\r\n send_telegram(trade_symbol + \" ВХОД: \" + str(dfKlines.close.iloc[-2]) + \" SHORT TP 2% / SL 2%\")\r\n\r\ndef on_error(ws, error):\r\n print(\"### error ###\")\r\n print(error)\r\n time.sleep(5)\r\n binance_socket()\r\n\r\ndef on_close(ws):\r\n print(\"### closed ###\")\r\n time.sleep(5)\r\n binance_socket()\r\n\r\ndef on_open(ws):\r\n print(\"### connected ###\")\r\n\r\n#if __name__ == \"__main__\":\r\ndef binance_socket():\r\n ws = websocket.WebSocketApp(\"wss://fstream.binance.com/ws/\" + trade_symbol_low + \"@kline_\" + bar_interval,\r\n on_message = on_message,\r\n on_error = on_error,\r\n on_close = on_close)\r\n ws.on_open = on_open\r\n ws.run_forever()\r\n\r\nbinance_socket()\r\n","repo_name":"pavkapopov/BILL_WILLIAMS_TRADING_CHAOS_BOOK3","sub_path":"telegram_signal_bot/telegram_signal_future_divbar_ao_bb_rsi_aroon.py","file_name":"telegram_signal_future_divbar_ao_bb_rsi_aroon.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"10694112057","text":"import unittest\nimport astrodate\nimport earth\n\nclass Test_Earth(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_nutation(self):\n d = astrodate.AstroDate().alloc_with_tuple((1987, 4, 10, 0, 0, 0.0, u'td'))\n jde = d.get_julian()\n n = earth.Nutation()\n n.calculate_with_julianTD(jde)\n nutation_in_longitude = n.get_nutation_in_longitude()\n nutation_in_obliquity = n.get_nutation_in_obliquity()\n mean_obliquity = n.get_mean_obliquity()\n true_obliquity = n.get_true_obliquity()\n self.assertEqual(nutation_in_longitude, -0.001052332550403307)\n self.assertEqual(nutation_in_obliquity, 0.002622947155040129)\n self.assertEqual(mean_obliquity, 23.440946290957324)\n self.assertEqual(true_obliquity, 23.443569238112364)\n\n def test_position(self):\n p = earth.Position(earth.TERMS_VSOP87D)\n\n jde = astrodate.calculate_julian(1998, 1, 1)\n p.calculate_with_julianTD(jde)\n latitude = p.get_latitude()\n longitude = p.get_longitude()\n radius = p.get_radius()\n self.assertAlmostEqual(latitude, 2.1651204901319734e-05, 14)\n self.assertEqual(longitude, 100.363323142225)\n self.assertEqual(radius, 0.9833333560780444)\n\n jde = astrodate.calculate_julian(2008, 1, 1)\n p.calculate_with_julianTD(jde)\n latitude = p.get_latitude()\n longitude = p.get_longitude()\n radius = p.get_radius()\n self.assertAlmostEqual(latitude, 0.00011901318043573914, 14)\n self.assertEqual(longitude, 99.92973010197568)\n self.assertEqual(radius, 0.983289338510011)\n\n jde = astrodate.calculate_julian(2018, 1, 1)\n p.calculate_with_julianTD(jde)\n latitude = p.get_latitude()\n longitude = p.get_longitude()\n radius = p.get_radius()\n self.assertAlmostEqual(latitude, 4.95339099701003e-05, 14)\n self.assertEqual(longitude, 100.51614552278916)\n self.assertEqual(radius, 0.9833010058152097)\n\n\nif __name__ == '__main__':\n\n unittest.main()\n","repo_name":"markhamilton1/AstroScript","sub_path":"astrocore/earthtests.py","file_name":"earthtests.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"38426336185","text":"class TableDesc:\n from_cell = [0, 0]\n to_cell = [0, 0]\n empty = False\n content = ''\n content_len = 0\n bg_color = '#FFFFFF'\n border_black = False # default or black\n border_up = False\n border_right = False\n border_down = False\n border_left = False\n '''\n default: style=\"border:1px solid #aaa”\n 上右下左 style=\"border-width:2px 2px 2px 0; border-style:solid; border-color:black;\"\n '''\n\n def __init__(self,\n from_cell: tuple = (0, 0),\n to_cell: tuple = (0, 0),\n empty=False,\n content: str = '',\n bg_color: str = '#FFFFFF',\n border_black: bool = False,\n border_active: tuple = (False, False, False, False),\n content_len=-1\n ):\n self.from_cell = list(from_cell)\n self.to_cell = list(to_cell)\n self.empty = empty\n self.content = content\n self.content_len = content_len if content_len != -1 else len(content)\n self.bg_color = bg_color\n self.border_black = border_black\n self.border_up = border_active[0]\n self.border_right = border_active[1]\n self.border_down = border_active[2]\n self.border_left = border_active[3]\n\n def __str__(self):\n out_str_item = [\n str(self.from_cell),\n str(self.to_cell),\n str(self.empty),\n self.content,\n str(self.content_len),\n self.bg_color,\n str(self.border_black),\n str(self.border_up),\n str(self.border_right),\n str(self.border_down),\n str(self.border_left),\n ]\n return \",\".join(out_str_item)\n\n def shift(self, row_shift: int, col_shift: int):\n self.from_cell[0] += row_shift\n self.to_cell[0] += row_shift\n self.from_cell[1] += col_shift\n self.to_cell[1] += col_shift\n\n\ndef union_table(in_table_list_list: list) -> list:\n column_limit = 0\n for in_table_list in in_table_list_list:\n column_limit_item = max([in_table.to_cell[1] for in_table in in_table_list]) + 1\n if column_limit < column_limit_item:\n column_limit = column_limit_item\n row_limit = 0\n for in_table_list in in_table_list_list:\n row_limit_item = max([in_table.to_cell[0] for in_table in in_table_list]) + 1\n row_limit += row_limit_item\n for in_table_list in in_table_list_list:\n this_column_limit = max([in_table.to_cell[1] for in_table in in_table_list]) + 1\n for cell in in_table_list:\n cell.shift(0, column_limit - this_column_limit)\n\n row_current = 0\n for in_table_list in in_table_list_list:\n for cell in in_table_list:\n cell.shift(row_current, 0)\n row_current += max([in_table.to_cell[0] for in_table in in_table_list]) + 2\n return_result = []\n for in_table_list in in_table_list_list:\n for cell in in_table_list:\n return_result.append(cell)\n return_result = padding_0(return_result)\n return_result.sort(key=lambda cell_1: (cell_1.from_cell[0], cell_1.from_cell[1]))\n return return_result\n\n\ndef padding_0(table_pos_all: list) -> list:\n occupied_grid = []\n row_limit = max([cell.to_cell[0] for cell in table_pos_all]) + 1\n column_limit = max([cell.to_cell[1] for cell in table_pos_all]) + 1\n for i in range(row_limit):\n occupied_grid.append([])\n for j in range(column_limit):\n occupied_grid[i].append(False)\n for cell in table_pos_all:\n from_row = cell.from_cell[0]\n from_col = cell.from_cell[1]\n to_row = cell.to_cell[0] + 1\n to_col = cell.to_cell[1] + 1\n for i in range(from_row, to_row):\n for j in range(from_col, to_col):\n occupied_grid[i][j] = True\n for i in range(row_limit):\n j = 0\n while j < column_limit:\n if not occupied_grid[i][j]:\n k = j\n while k < column_limit - 1:\n if occupied_grid[i][k + 1]:\n break\n k += 1\n t7 = TableDesc(\n (i, j),\n (i, k),\n True,\n )\n table_pos_all.append(t7)\n j = k + 1\n else:\n j += 1\n table_pos_all.sort(key=lambda cell: (cell.from_cell[0], cell.from_cell[1]))\n return table_pos_all\n","repo_name":"ibicdlcod/shogimatches","sub_path":"src/metastruct/table_desc.py","file_name":"table_desc.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24308231581","text":"# Exercício 102\n# Programa que tenha uma função fatorial() que receba dois parâmetros:\n# o primeiro que indique o número a calcular e o outro chamado show,\n# que será um valor lógico (opcional) indicando se será mostrado ou não\n# na tela o processo de cálculo do fatorial (fazer a docstring)\ndef fatorial(n, show=False):\n f = 1\n for c in range(n, 0, -1):\n f *= c\n if show == True:\n if c == 1:\n print(f'{c} =', end=' ')\n else:\n print(f'{c} x', end=' ')\n print(f)\n\n\nn = int(input('Digite um número: '))\nfatorial(n)\n","repo_name":"LucasLCarreira/Python","sub_path":"ex102.py","file_name":"ex102.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36860689278","text":"from typing import List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom bluemira.optimisation import optimise\n\n\ndef f_objective(x: np.ndarray) -> float:\n \"\"\"Objective function to minimise.\"\"\"\n return np.sqrt(x[1])\n\n\ndef df_objective(x: np.ndarray) -> np.ndarray:\n \"\"\"Gradient of the objective function.\"\"\"\n return np.array([0.0, 0.5 / np.sqrt(x[1])])\n\n\ndef f_constraint(x: np.ndarray, a: float, b: float) -> np.ndarray:\n \"\"\"Inequality constraint function.\"\"\"\n return (a * x[0] + b) ** 3 - x[1]\n\n\ndef df_constraint(x: np.ndarray, a: float, b: float) -> np.ndarray:\n \"\"\"Inequality constraint gradient.\"\"\"\n return np.array([3 * a * (a * x[0] + b) * (a * x[0] + b), -1.0])\n\n\nresult = optimise(\n f_objective,\n x0=np.array([0.8, 2.5]),\n algorithm=\"SLSQP\",\n df_objective=df_objective,\n opt_conditions={\"ftol_rel\": 1e-12, \"max_eval\": 200},\n keep_history=True,\n bounds=(np.array([-np.inf, 0]), np.array([np.inf, np.inf])),\n ineq_constraints=[\n {\n \"f_constraint\": lambda x: f_constraint(x, 2, 0),\n \"df_constraint\": lambda x: df_constraint(x, 2, 0),\n \"tolerance\": np.array([1e-8]),\n },\n {\n \"f_constraint\": lambda x: f_constraint(x, -1, 1),\n \"df_constraint\": lambda x: df_constraint(x, -1, 1),\n \"tolerance\": np.array([1e-8]),\n },\n ],\n)\nprint(result)\n\n# %% [markdown]\n# ## Visualising the Optimisation\n# Using the history of the optimiser result,\n# we can plot the route the optimiser took to get to the minimum.\n#\n# The code below produces an image of the optimisation space,\n# with the constrained areas shaded in grey.\n# The path the optimiser took is shown by the plotted points,\n# which get smaller and darker at each iteration.\n\n\n# %%\n# %matplotlib inline\ndef c1(x1):\n \"\"\"Line drawn by limit of first constraint.\"\"\"\n return 8 * x1**3\n\n\ndef c2(x1):\n \"\"\"Line drawn by limit of second constraint.\"\"\"\n return (1 - x1) ** 3\n\n\nmesh_resolution = 201 # points per dimension\nx = np.linspace(-0.5, 1, mesh_resolution)\ny = np.linspace(0, 3, mesh_resolution)\nxx, yy = np.meshgrid(x, y)\nzz = f_objective(np.vstack((xx.ravel(), yy.ravel()))).reshape(xx.shape)\n\nfig, ax = plt.subplots()\ncolor_mesh = ax.pcolormesh(x, y, zz, cmap=\"viridis_r\")\ncbar = fig.colorbar(color_mesh, ax=ax)\ncbar.set_label(\"$f(x_1, x_2)$\")\nax.fill_between(x, c1(x), color=\"k\", alpha=0.2)\nax.fill_between(x, c2(x), color=\"k\", alpha=0.2)\nfor i, (x0, _) in enumerate(result.history):\n alpha = 0.5 + (0.5 * (i + 1)) / len(result.history)\n size = 8 - (8 * i / len(result.history))\n ax.plot(*x0, \"go\", markersize=size, alpha=alpha, markeredgecolor=\"k\")\nax.plot(*result.x, \"rx\", label=\"Feasible Minimum\")\nax.set_title(\"Optimiser History Visualisation\")\nax.set_xlabel(\"$x_1$\")\nax.set_ylabel(\"$x_2$\")\nax.set_ylim(0, y.max())\nax.legend()\nplt.show()\n\n\n# %% [markdown]\n# You'll notice SLSQP reaches the correct area very fast,\n# before searching a smaller area in order to satisfy the tolerance.\n# If you zoom in on the optimum, you will see that it actually lies just\n# inside the infeasible region.\n# This is mostly due to the resolution used in the plotting,\n# but also because the point may lie inside the region\n# within the constraint tolerance.\n# If you significantly increase the resolution of the plot,\n# and shift the region to allow for the constraint tolerance,\n# the point will lie within the feasible region.\n\n\n# %% [markdown]\n# ## Using the `OptimisationProblem` Class\n# Alternatively, we can take a class-based approach to defining this\n# optimisation problem, using the `OptimisationProblem` base class.\n\n# %%\nfrom bluemira.optimisation import OptimisationProblem\nfrom bluemira.optimisation.typing import ConstraintT\n\n\nclass NonLinearConstraintOP(OptimisationProblem):\n \"\"\"Optimisation problem with non-linear constraints.\"\"\"\n\n def __init__(self, a1: float, a2: float, b1: float, b2: float):\n self.a1 = a1\n self.a2 = a2\n self.b1 = b1\n self.b2 = b2\n\n def objective(self, x: np.ndarray) -> float: # noqa: PLR6301\n \"\"\"Objective function to minimise.\"\"\"\n return np.sqrt(x[1])\n\n def df_objective(self, x: np.ndarray) -> np.ndarray: # noqa: PLR6301\n \"\"\"Gradient of the objective function.\"\"\"\n return np.array([0.0, 0.5 / np.sqrt(x[1])])\n\n def bounds(self) -> Tuple[np.ndarray, np.ndarray]: # noqa: PLR6301\n \"\"\"\n The lower and upper bounds of the optimisation parameters.\n\n Each set of bounds must be convertible to a numpy array of\n floats. If the lower or upper bound is a scalar value, that\n value is set as the bound for each of the optimisation\n parameters.\n \"\"\"\n return np.array([-np.inf, 0]), np.array([np.inf, np.inf])\n\n def ineq_constraints(self) -> List[ConstraintT]:\n \"\"\"The inequality constraints on the optimisation.\"\"\"\n return [\n {\n \"f_constraint\": lambda x: self.f_constraint(x, self.a1, self.b1),\n \"df_constraint\": lambda x: self.df_constraint(x, self.a1, self.b1),\n \"tolerance\": np.array([1e-8]),\n },\n {\n \"f_constraint\": lambda x: self.f_constraint(x, self.a2, self.b2),\n \"df_constraint\": lambda x: self.df_constraint(x, self.a2, self.b2),\n \"tolerance\": np.array([1e-8]),\n },\n ]\n\n def f_constraint(\n self, x: np.ndarray, a: float, b: float # noqa: PLR6301\n ) -> np.ndarray:\n \"\"\"Inequality constraint function.\"\"\"\n return (a * x[0] + b) ** 3 - x[1]\n\n @staticmethod\n def df_constraint(x: np.ndarray, a: float, b: float) -> np.ndarray:\n \"\"\"Inequality constraint gradient.\"\"\"\n return np.array([3 * a * (a * x[0] + b) * (a * x[0] + b), -1.0])\n\n\nopt_problem = NonLinearConstraintOP(2, -1, 0, 1)\nresult = opt_problem.optimise(\n x0=np.array([1, 1]),\n algorithm=\"SLSQP\",\n opt_conditions={\"xtol_rel\": 1e-10, \"max_eval\": 1000},\n keep_history=False,\n)\nprint(result)\n","repo_name":"Fusion-Power-Plant-Framework/bluemira","sub_path":"examples/optimisation/nonlinearly_constrained_problem.ex.py","file_name":"nonlinearly_constrained_problem.ex.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"50"} +{"seq_id":"9360749337","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', help='filter3.fa', required=True)\nparser.add_argument('-n', type=int, required=True, help='number of seqs for each splitting')\nparser.add_argument('-o', default='split', help='prefix file name')\nargs = parser.parse_args()\nin_file, out_file, seq_n = args.i, args.o, args.n\n\nwith open(in_file) as f:\n seq_number = 0\n file_id = 1\n f2 = open(out_file+\"1.fa\", 'w')\n for line in f:\n if line.startswith('>'):\n seq_number += 1\n if seq_number <= seq_n:\n f2.write(line)\n else:\n f2.close()\n file_id += 1\n f2 = open(out_file+str(file_id)+\".fa\", 'w')\n f2.write(line)\n seq_number = 1\n else:\n f2.write(line)\n f2.close()\n\n","repo_name":"czheluo/lncRNA-pipeline","sub_path":"bin/bin/split_fa.py","file_name":"split_fa.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"17423252314","text":"import telebot\n\nkeyboardEN = telebot.types.ReplyKeyboardMarkup(True,)\nkeyboardEN.row('Servers', 'Storages')\nkeyboardEN.row('Russian')\n\nkeyboardServerEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServerEN.row('Rack', 'High-Density')\nkeyboardServerEN.row('Blade')\nkeyboardServerEN.row('Back')\n\nkeyboardServerRackEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServerRackEN.row('1288H V5', '2288H V5')\nkeyboardServerRackEN.row('5288 V5', '2488H V5')\nkeyboardServerRackEN.row('5885H V5')\nkeyboardServerRackEN.row('Back')\n\nkeyboardHighDensityEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardHighDensityEN.row('X6000', 'X6800')\nkeyboardHighDensityEN.row('Back')\n\nkeyboardX6000EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardX6000EN.row('Data Sheet','Documentations')\nkeyboardX6000EN.row('3D-model')\nkeyboardX6000EN.row('Back')\n\nkeyboardX6800EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardX6800EN.row('Back')\n\nkeyboardBladeEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardBladeEN.row('E9000')\nkeyboardBladeEN.row('Back')\n\nkeyboardE9000EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardE9000EN.row('Data Sheet','Documentations')\nkeyboardE9000EN.row('3D-model')\nkeyboardE9000EN.row('Back')\n\nkeyboardServer1288EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServer1288EN.row('Data Sheet','Documentations')\nkeyboardServer1288EN.row('3D-model')\nkeyboardServer1288EN.row('Back')\n\nkeyboardServer2288EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServer2288EN.row('Data Sheet','Documentations')\nkeyboardServer2288EN.row('3D-model')\nkeyboardServer2288EN.row('Back')\n\nkeyboardServer5288EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServer5288EN.row('Data Sheet','Documentations')\nkeyboardServer5288EN.row('3D-model')\nkeyboardServer5288EN.row('Back')\n\nkeyboardServer2488EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServer2488EN.row('Data Sheet','Documentations')\nkeyboardServer2488EN.row('3D-model')\nkeyboardServer2488EN.row('Back')\n\nkeyboardServer5885EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardServer5885EN.row('Data Sheet','Documentations')\nkeyboardServer5885EN.row('3D-model')\nkeyboardServer5885EN.row('Back')\n\nkeyboardStorageEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorageEN.row('All-Flash', 'Hybrid')\nkeyboardStorageEN.row('Back')\n\nkeyboardStorageAllFlashEN = telebot.types.ReplyKeyboardMarkup(True,)\nkeyboardStorageAllFlashEN.row('OceanStor Dorado V3','OceanStor Dorado V6')\nkeyboardStorageAllFlashEN.row('Back')\n\n\nkeyboardStorageDoradoV3EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorageDoradoV3EN.row('Data Sheet','Documentations')\nkeyboardStorageDoradoV3EN.row('Description','Tests')\nkeyboardStorageDoradoV3EN.row('3D-model')\nkeyboardStorageDoradoV3EN.row('Back')\n\n\nkeyboardStorageDoradoV6EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorageDoradoV6EN.row('Back')\n\n\nkeyboardStorageHybridEN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorageHybridEN.row('OceanStor 2200 V3','OceanStor 2600 V3')\nkeyboardStorageHybridEN.row('OceanStor 5000 V5','OceanStor 6800 V5')\nkeyboardStorageHybridEN.row('OceanStor 18000 V5')\nkeyboardStorageHybridEN.row('Back')\n\nkeyboardStorage2200EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorage2200EN.row('Description','3D-model')\nkeyboardStorage2200EN.row('Back')\n\nkeyboardStorage2600EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorage2600EN.row('Description','Data Sheet')\nkeyboardStorage2600EN.row('3D-model')\nkeyboardStorage2600EN.row('Back')\n\nkeyboardStorage5000EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorage5000EN.row('Description','Data Sheet')\nkeyboardStorage5000EN.row('Documentations','3D-model')\nkeyboardStorage5000EN.row('Back')\n\nkeyboardStorage6800EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorage6800EN.row('Description','Data Sheet')\nkeyboardStorage6800EN.row('Documentations','3D-model')\nkeyboardStorage6800EN.row('Back')\n\nkeyboardStorage18000EN = telebot.types.ReplyKeyboardMarkup(True)\nkeyboardStorage18000EN.row('Description','Data Sheet')\nkeyboardStorage18000EN.row('Documentations','3D-model')\nkeyboardStorage18000EN.row('Back')\n\ndef english(bot,message,type,read,i):\n\n if message.text.lower() == 'english/английский' or message.text.lower() == 'английский':\n bot.send_message(message.chat.id,\n 'Hello, ' + message.from_user.first_name + ', Welcome to HuaweiEBG_Bot ✋. Which direction are you interested in?',\n reply_markup=keyboardEN)\n\n elif message.text.lower() == 'back':\n bot.send_message(message.chat.id, 'Which direction are you interested in?', reply_markup=keyboardEN)\n\n elif message.text.lower() == 'servers':\n bot.send_message(message.chat.id, 'Choose the type of server you are interested in.',reply_markup=keyboardServerEN)\n\n elif message.text.lower() == 'rack':\n bot.send_message(message.chat.id, 'Choose the server of interest', reply_markup=keyboardServerRackEN)\n\n elif message.text.lower() == '1288h v5':\n type[i] = 8\n bot.send_message(message.chat.id, read(\"Server!B8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!B3\"),\n reply_markup=keyboardServer1288EN)\n elif message.text.lower() == 'data sheet' and type[i] == 8:\n bot.send_message(message.chat.id, '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!B5\"),\n reply_markup=keyboardServer1288EN)\n elif message.text.lower() == 'documentations' and type[i] == 8:\n bot.send_message(message.chat.id, '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!B7\"),\n reply_markup=keyboardServer1288EN)\n elif message.text.lower() == '3d-model' and type[i] == 8:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!B4\"),\n reply_markup=keyboardServer1288EN)\n\n elif message.text.lower() == '2288h v5':\n type[i] = 9\n bot.send_message(message.chat.id, read(\"Server!C8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!C3\"),\n reply_markup=keyboardServer2288EN)\n elif message.text.lower() == 'data sheet' and type[i] == 9:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!C5\"),\n reply_markup=keyboardServer2288EN)\n elif message.text.lower() == 'documentations' and type[i] == 9:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!C7\"),\n reply_markup=keyboardServer2288EN)\n elif message.text.lower() == '3d-model' and type[i] == 9:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!C4\"),\n reply_markup=keyboardServer2288EN)\n\n elif message.text.lower() == '5288 v5':\n type[i] = 10\n bot.send_message(message.chat.id, read(\"Server!D8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!D3\"),\n reply_markup=keyboardServer5288EN)\n elif message.text.lower() == 'data sheet' and type[i] == 10:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!D5\"),\n reply_markup=keyboardServer5288EN)\n elif message.text.lower() == 'documentations' and type[i] == 10:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!D7\"),\n reply_markup=keyboardServer5288EN)\n elif message.text.lower() == '3d-model' and type[i] == 10:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!D4\"),\n reply_markup=keyboardServer5288EN)\n\n elif message.text.lower() == '2488h v5':\n type[i] = 11\n bot.send_message(message.chat.id, read(\"Server!E8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!E3\"),\n reply_markup=keyboardServer2488EN)\n elif message.text.lower() == 'data sheet' and type[i] == 11:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!E5\"),\n reply_markup=keyboardServer2488EN)\n elif message.text.lower() == 'documentations' and type[i] == 11:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!E7\"),\n reply_markup=keyboardServer2488EN)\n elif message.text.lower() == '3d-model' and type[i] == 11:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!E4\"),\n reply_markup=keyboardServer2488EN)\n\n elif message.text.lower() == '5885h v5':\n type[i] = 12\n bot.send_message(message.chat.id, read(\"Server!F8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!F3\"),\n reply_markup=keyboardServer5885EN)\n elif message.text.lower() == 'data sheet' and type[i] == 12:\n bot.send_message(message.chat.id,\n '👇👇👇 Data sheet 👇👇👇 \\n' + read(\"Server!F5\"),\n reply_markup=keyboardServer5885EN)\n elif message.text.lower() == 'documentations' and type[i] == 12:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!F7\"),\n reply_markup=keyboardServer5885EN)\n elif message.text.lower() == '3d-model' and type[i] == 12:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!F4\"),\n reply_markup=keyboardServer5885EN)\n\n elif message.text.lower() == 'high-density':\n bot.send_message(message.chat.id, 'Выберете интересующую модель', reply_markup=keyboardHighDensityEN)\n\n elif message.text.lower() == 'x6000':\n type[i] = 13\n bot.send_message(message.chat.id, read(\"Server!G8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!G3\"),\n reply_markup=keyboardX6000EN)\n elif message.text.lower() == 'data sheet' and type[i] == 13:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!G5\"),\n reply_markup=keyboardX6000EN)\n elif message.text.lower() == 'documentations' and type[i] == 13:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!G7\"),\n reply_markup=keyboardX6000EN)\n elif message.text.lower() == '3d-model' and type[i] == 13:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!G4\"),\n reply_markup=keyboardX6000EN)\n\n elif message.text.lower() == 'x6800':\n type[i] = 14\n bot.send_message(message.chat.id, read(\"Server!H8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!H3\"),\n reply_markup=keyboardX6800EN)\n\n elif message.text.lower() == 'blade':\n bot.send_message(message.chat.id, 'Выберете интересующую модель', reply_markup=keyboardBladeEN)\n\n elif message.text.lower() == 'e9000':\n type[i] = 15\n bot.send_message(message.chat.id, read(\"Server!I8\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Server!I3\"),\n reply_markup=keyboardE9000EN)\n elif message.text.lower() == 'data sheet' and type[i] == 15:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Server!I5\"),\n reply_markup=keyboardE9000EN)\n elif message.text.lower() == 'documentations' and type[i] == 15:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Server!I7\"),\n reply_markup=keyboardE9000EN)\n elif message.text.lower() == '3d-model' and type[i] == 15:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Server!I4\"),\n reply_markup=keyboardE9000EN)\n\n elif message.text.lower() == 'storages':\n bot.send_message(message.chat.id, 'Выберете тип системы хранения данных',reply_markup=keyboardStorageEN)\n\n elif message.text.lower() == 'all-flash':\n bot.send_message(message.chat.id, 'Выберете интересующий модельный ряд', reply_markup=keyboardStorageAllFlashEN)\n elif message.text.lower() == 'hybrid':\n bot.send_message(message.chat.id, 'Выберете интересующий модельный ряд', reply_markup=keyboardStorageHybridEN)\n\n elif message.text.lower() == 'oceanstor dorado v3':\n type[i] = 1\n bot.send_message(message.chat.id, read(\"Storage!C9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!C3\"),\n reply_markup=keyboardStorageDoradoV3EN)\n elif message.text.lower() == 'data sheet' and type[i] == 1:\n bot.send_message(message.chat.id, '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Storage!C5\"),\n reply_markup=keyboardStorageDoradoV3EN)\n elif message.text.lower() == 'documentations' and type[i] == 1:\n bot.send_message(message.chat.id, '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Storage!C7\"),\n reply_markup=keyboardStorageDoradoV3EN)\n elif message.text.lower() == 'description' and type[i] == 1:\n bot.send_message(message.chat.id, '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!C8\"),\n reply_markup=keyboardStorageDoradoV3EN)\n elif message.text.lower() == 'tests' and type[i] == 1:\n bot.send_message(message.chat.id, '👇👇👇 Tests 👇👇👇 \\n' + read(\"Storage!C6\"),\n reply_markup=keyboardStorageDoradoV3EN)\n elif message.text.lower() == '3d-model' and type[i] == 1:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!C4\"),\n reply_markup=keyboardStorageDoradoV3EN)\n\n elif message.text.lower() == 'oceanstor dorado v6':\n type[i] = 2\n bot.send_message(message.chat.id, read(\"Storage!B9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!B3\"),\n reply_markup=keyboardStorageDoradoV6EN)\n\n elif message.text.lower() == 'oceanstor 2200 v3':\n type[i] = 3\n bot.send_message(message.chat.id, read(\"Storage!D9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!D3\"),\n reply_markup=keyboardStorage2200EN)\n elif message.text.lower() == 'description' and type[i] == 3:\n bot.send_message(message.chat.id,\n '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!D8\"),\n reply_markup=keyboardStorage2200EN)\n elif message.text.lower() == '3d-model' and type[i] == 3:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!D4\"),\n reply_markup=keyboardStorage2200EN)\n\n elif message.text.lower() == 'oceanstor 2600 v3':\n type[i] = 4\n bot.send_message(message.chat.id, read(\"Storage!E9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!E3\"),\n reply_markup=keyboardStorage2600EN)\n elif message.text.lower() == 'description' and type[i] == 4:\n bot.send_message(message.chat.id,\n '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!E8\"),\n reply_markup=keyboardStorage2600EN)\n elif message.text.lower() == 'data sheet' and type[i] == 4:\n bot.send_message(message.chat.id, '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Storage!E5\"),\n reply_markup=keyboardStorage2600EN)\n elif message.text.lower() == '3d-model' and type[i] == 4:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!E4\"),\n reply_markup=keyboardStorage2600EN)\n\n elif message.text.lower() == 'oceanstor 5000 v5':\n type[i] = 5\n bot.send_message(message.chat.id, read(\"Storage!F9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!F3\"),\n reply_markup=keyboardStorage5000EN)\n elif message.text.lower() == 'documentations' and type[i] == 5:\n bot.send_message(message.chat.id, '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Storage!F7\"),\n reply_markup=keyboardStorage5000EN)\n elif message.text.lower() == 'description' and type[i] == 5:\n bot.send_message(message.chat.id,\n '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!F8\"),\n reply_markup=keyboardStorage5000EN)\n elif message.text.lower() == 'data sheet' and type[i] == 5:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Storage!F5\"),\n reply_markup=keyboardStorage5000EN)\n elif message.text.lower() == '3d-model' and type[i] == 5:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!F4\"),\n reply_markup=keyboardStorage5000EN)\n\n elif message.text.lower() == 'oceanstor 6800 v5':\n type[i] = 6\n bot.send_message(message.chat.id, read(\"Storage!G9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!G3\"),\n reply_markup=keyboardStorage6800EN)\n elif message.text.lower() == 'documentations' and type[i] == 6:\n bot.send_message(message.chat.id, '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Storage!G7\"),\n reply_markup=keyboardStorage6800EN)\n elif message.text.lower() == 'description' and type[i] == 6:\n bot.send_message(message.chat.id,\n '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!G8\"),\n reply_markup=keyboardStorage6800EN)\n elif message.text.lower() == 'data sheet' and type[i] == 6:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Storage!G5\"),\n reply_markup=keyboardStorage6800EN)\n elif message.text.lower() == '3d-model' and type[i] == 6:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!G4\"),\n reply_markup=keyboardStorage6800EN)\n\n elif message.text.lower() == 'oceanstor 18000 v5':\n type[i] = 7\n bot.send_message(message.chat.id, read(\"Storage!H9\") +\n \"\"\"\\n\\n Для ссылки на документацию, выберете интересующий документ\n \\n\"\"\" + read(\"Storage!H3\"),\n reply_markup=keyboardStorage18000EN)\n elif message.text.lower() == 'documentations' and type[i] == 7:\n bot.send_message(message.chat.id,\n '👇👇👇 Documentations 👇👇👇 \\n' + read(\"Storage!H7\"),\n reply_markup=keyboardStorage18000EN)\n elif message.text.lower() == 'description' and type[i] == 7:\n bot.send_message(message.chat.id,\n '👇👇👇 Description 👇👇👇 \\n' + read(\"Storage!H8\"),\n reply_markup=keyboardStorage18000EN)\n elif message.text.lower() == 'data sheet' and type[i] == 7:\n bot.send_message(message.chat.id,\n '👇👇👇 Data Sheet 👇👇👇 \\n' + read(\"Storage!H5\"),\n reply_markup=keyboardStorage18000EN)\n elif message.text.lower() == '3d-model' and type[i] == 7:\n bot.send_message(message.chat.id,\n '👇👇👇 3D-model 👇👇👇 \\n' + read(\"Storage!H4\"),\n reply_markup=keyboardStorage18000EN)\n\n else:\n bot.send_message(message.chat.id,'Sorry, I do not understand you. Please click on the button or write /start')","repo_name":"SerhiiDemydov/HuaweiBot","sub_path":"English.py","file_name":"English.py","file_ext":"py","file_size_in_byte":22620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"7906664379","text":"from time import *\nprint(\"\"\"\n \nMultiples of 3 and 5\nProblem 1\n\n If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3,5,6 and 9. The sum of these multiples is 23.\n\n Find the sum of all the multiples of 3 or 5 below 1000.\n\n Answer: 233168\n\n \"\"\")\nwhile True:\n factor1 = int(input(\"Enter first Factor: \"))\n factor2 = int(input(\"Enter second Factor: \"))\n limit = int(input(\"Enter limit: \"))\n start=clock()\n multSet = set()\n for i in range(limit):\n if i%factor1 == 0 or i%factor2 == 0:\n multSet.add(i)\n multipleSum = sum(multSet)\n print(\"The multiple sum of {} and {} for numbers under {} is {}\".format(factor1, factor2, limit, multipleSum))\n end = clock()\n print(\"Your program took {} seconds to run\".format(end-start))\n","repo_name":"triplejay2013/ProjectEuler","sub_path":"1/multiples.py","file_name":"multiples.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37390535928","text":"from typing import Iterable, List\n\nimport pandas as pd\n\nfrom ixmp4 import db\nfrom ixmp4.data.abstract import optimization as abstract\nfrom ixmp4.data.auth.decorators import guard\n\nfrom .. import base\nfrom .docs import IndexSetDocsRepository\nfrom .model import IndexSet\n\n\nclass IndexSetRepository(\n base.Creator[IndexSet],\n base.Retriever[IndexSet],\n base.Enumerator[IndexSet],\n abstract.IndexSetRepository,\n):\n model_class = IndexSet\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.docs = IndexSetDocsRepository(*args, **kwargs)\n\n from .filter import OptimizationIndexSetFilter\n\n self.filter_class = OptimizationIndexSetFilter\n\n def add(self, run_id: int, name: str) -> IndexSet:\n indexset = IndexSet(run__id=run_id, name=name, **self.get_creation_info())\n self.session.add(indexset)\n return indexset\n\n @guard(\"view\")\n def get(self, run_id: int, name: str) -> IndexSet:\n exc = db.select(IndexSet).where(\n (IndexSet.name == name) & (IndexSet.run__id == run_id)\n )\n try:\n return self.session.execute(exc).scalar_one()\n except db.NoResultFound:\n raise IndexSet.NotFound\n\n @guard(\"view\")\n def get_by_id(self, id: int) -> IndexSet:\n obj = self.session.get(self.model_class, id)\n\n if obj is None:\n raise IndexSet.NotFound(id=id)\n\n return obj\n\n @guard(\"edit\")\n def create(self, run_id: int, name: str, **kwargs) -> IndexSet:\n return super().create(run_id=run_id, name=name, **kwargs)\n\n @guard(\"view\")\n def list(self, *args, **kwargs) -> Iterable[IndexSet]:\n return super().list(*args, **kwargs)\n\n @guard(\"view\")\n def tabulate(self, *args, **kwargs) -> pd.DataFrame:\n return super().tabulate(*args, **kwargs)\n\n @guard(\"edit\")\n def add_elements(\n self,\n indexset_id: int,\n elements: int | List[int | str] | str,\n ) -> None:\n indexset = self.get_by_id(id=indexset_id)\n if not isinstance(elements, list):\n elements = [elements]\n if indexset.elements is None:\n indexset.elements = elements\n else:\n indexset.elements = indexset.elements + elements\n\n self.session.add(indexset)\n self.session.commit()\n","repo_name":"iiasa/ixmp4","sub_path":"ixmp4/data/db/optimization/indexset/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"33124408437","text":"\"\"\"Provide operators\"\"\"\nfrom __future__ import annotations\n\nimport operator\nfrom typing import Any, Callable, Type, TYPE_CHECKING\n\nfrom .utils import evaluate_expr\nfrom .expression import Expression, OPERATORS\n\nif TYPE_CHECKING:\n from .context import ContextType\n\n\nclass OperatorCall(Expression):\n \"\"\"The operator call\n\n Args:\n op_func: The function to handle the call\n op_name: The name of the operator\n operands: The operands of the operator\n \"\"\"\n\n def __init__(\n self, op_func: Callable, op_name: str, *operands: Any\n ) -> None:\n self._pipda_op_func = op_func\n self._pipda_op_name = op_name\n self._pipda_operands = operands\n\n def __str__(self):\n \"\"\"String representation of the operator call\"\"\"\n op, right = OPERATORS[self._pipda_op_name]\n if right:\n return f\" {op} \".join(\n reversed([str(operand) for operand in self._pipda_operands])\n )\n if len(self._pipda_operands) == 1:\n return f\"{op}{str(self._pipda_operands[0])}\"\n\n return f\" {op} \".join(str(operand) for operand in self._pipda_operands)\n\n def _pipda_eval(\n self,\n data: Any,\n context: ContextType = None,\n ) -> Any:\n \"\"\"Evaluate the operator call\"\"\"\n operands = (\n evaluate_expr(arg, data, context)\n for arg in self._pipda_operands\n )\n return self._pipda_op_func(*operands)\n\n\nclass Operator:\n \"\"\"Defines the operators\n\n By default, it inherits the operator from the builtin `operator` library\n\n You can define you own operators by subclass this class and decorated it\n using `register_operator`.\n\n Examples:\n >>> @register_operator\n >>> class MyOperator(Operator):\n >>> def add(self, x, y):\n >>> return x * y\n \"\"\"\n\n def __getattr__(self, name: str) -> Callable:\n if not OPERATORS[name][1]:\n # not a right operator (e.g. radd)\n return getattr(operator, name)\n\n name = name[1:]\n return lambda x, y: getattr(operator, name)(y, x)\n\n\ndef register_operator(opclass: Type) -> Type:\n \"\"\"Register a operator class\n\n Can be worked as a decorator\n >>> @register_operator\n >>> class MyOperator(Operator):\n >>> ...\n\n Args:\n opclass: A subclass\n\n Returns:\n The opclass\n \"\"\"\n from .expression import Expression\n\n Expression._pipda_operator = opclass()\n return opclass\n","repo_name":"pwwang/pipda","sub_path":"pipda/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"50"} +{"seq_id":"7634630144","text":"#!/usr/bin/env python\n#\n# File = rws-00-fullSubredditRefresh.py\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the MIT license.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n############################################################################\n\nfrom gimpfu import *\nimport os\n\ndef full_subreddit_refresh(inputImage, inputDrawable, tgtPath):\n # Cleanup old sidebar and banner files before starting\n oldSidebarFile = os.path.join(tgtPath, 'sidebar-img.jpg')\n oldBannerImage = os.path.join(tgtPath, 'banner.jpg')\n os.remove(oldSidebarFile)\n os.remove(oldBannerImage)\n\n pdb.python_fu_get_winners(inputImage, inputDrawable, tgtPath)\n\n pdb.python_fu_gen_sidebar_image(inputImage, inputDrawable, tgtPath)\n\n pdb.python_fu_gen_banner_images(inputImage, inputDrawable, tgtPath)\n\n pdb.python_fu_build_banner(inputImage, inputDrawable, tgtPath)\n\n pdb.python_fu_update_subreddit(inputImage, inputDrawable, tgtPath)\n\n############################################################################\n\nregister (\n \"python_fu_full_subreddit_refresh\", # Name registered in Procedure Browser\n \"r/WetShaving weekly image refresh\", # Widget title\n \"Run through all the steps necessary for weekly updates to r/WetShaving's sidebar and banner images\", # Help\n \"BourbonInExile@gmail.com\", # Author\n \"BourbonInExile@gmail.com\", # Copyright Holder\n \"Feb 2019\", # Date\n \"<Image>/Reddit/0 - Full Banner Run\", # Menu Location\n \"\", # Image Type - No image required\n [ ( PF_DIRNAME, \"tgtPath\", \"Working Directory:\", \"\"), ], # Params\n [], # Results\n full_subreddit_refresh, # Matches to name of function being defined\n ) # End register\n\nmain()\n","repo_name":"waab76/RedditBannerUpdate","sub_path":"rws-00-fullSubredditRefresh.py","file_name":"rws-00-fullSubredditRefresh.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"21529071941","text":"#calling with the function\nclass van():\n def __init__(self,speed=9000,colour='mec'):\n self.velocity=speed\n self.paint=colour\n def display(self,cost):\n self.amount=cost\n print(self.velocity)\n print(self.paint)\n print(self.amount)\n\ncar=van(8888,'mec blue')\ncar.display('30757$')\n","repo_name":"leelasabarish/python","sub_path":"OOPS/methods/calling.py","file_name":"calling.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73001196316","text":"import logging\nfrom aplikasi.rate_operator import blueprint\nfrom aplikasi.rate_operator.utils import req_rate_operator, req_rate_operator_edit, show_rate_operator\nfrom aplikasi.base.controller import *\n\nfrom flask import session, render_template, redirect, url_for, request, jsonify\n\n\n@blueprint.route('/', methods=['GET'])\ndef rate_operator():\n\tdata = { 'menu_active': 'master'}\n\toperator = req_rate_operator()\n\tmessage = operator.to_dict(orient='records')\n\n\treturn open_page('rate_operator.html', data=data, message=message)\n\n@blueprint.route('/req', methods=['POST'])\ndef rate_operator_edit_json():\n nilai_minimal=request.form.get('nilai_minimal')\n nilai_maksimal=request.form.get('nilai_maksimal')\n rate=request.form.get('rate')\n no_admin=request.form.get('no_admin')\n kode_provider=request.form.get('kode_provider')\n\n data=req_rate_operator_edit(nilai_minimal, nilai_maksimal, rate, no_admin, kode_provider)\n\n return jsonify({'rc':'00','rc_desc':'success','data':data})\n\n@blueprint.route('/show', methods=['POST'])\ndef rate_operator_show_json():\n kode_provider=request.form.get('kode_provider')\n\n rate = show_rate_operator(kode_provider)\n data = rate.to_dict(orient='records')\n\n return jsonify({'data':data})\n","repo_name":"Baguspanji/bagi-pulsa","sub_path":"aplikasi/rate_operator/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36350546957","text":"'''\nExercício Python 082:\nCrie um programa que vai ler vários números e colocar em uma lista.\nDepois disso:\ncrie duas listas extras que vão conter apenas os valores pares e os valores ímpares\ndigitados, respectivamente.\nAo final, mostre o conteúdo das três listas geradas.\n'''\n\nlista = []\npar = []\nimpar = []\nwhile True:\n lista.append(int(input(\"Digite um número: \")))\n c = str(input(\"Quer continuar? [S/N]\"))\n if c in 'Nn':\n break\nprint(f\"A lista digitada é {lista}\")\nfor i, v in enumerate(lista):\n if v % 2 == 0:\n par.append(v)\n elif v % 2 == 1:\n impar.append(v)\nprint(f\"A lista digitada contêm os números pares {par}\")\nprint(f\"A lista digitada contem os números impares {impar}\")\n\n\n","repo_name":"ricardosaltmann/exerciciospython1","sub_path":"aula17/ex082b.py","file_name":"ex082b.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9642768944","text":"from flask import Flask, request, render_template\n\napp = Flask(__name__)\n\napp.secret_key = \"M4sImoe3nk sa!iki MaiN3 Vap3\"\n\ndef write_to_file(stub, data):\n with open(\"static/articles/{}.txt\".format(stub), \"w+\") as f:\n f.write(data)\n\ndef read_from_file(stub):\n with open(\"static/articles/{}.txt\".format(stub)) as f:\n return f.readlines()\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"home.html\")\n\n@app.route(\"/article/<stub>\")\ndef read_article(stub):\n description=read_from_file(stub)\n return render_template(\n \"article.html\",\n title=stub,\n description=description\n )\n\n@app.route(\"/article/<stub>/json\")\ndef read_article_json(stub):\n data=read_from_file(stub)\n return dict(title=stub, description=data)\n\n@app.route(\"/article\", methods=[\"GET\", \"POST\"])\ndef post_article():\n if request.method == \"GET\":\n return render_template(\"post_article.html\")\n elif request.method == \"POST\":\n title = request.form[\"title\"]\n description = request.form[\"description\"]\n print(title)\n print(description)\n write_to_file(title, description)\n return render_template(\n \"article.html\",\n title=title,\n description=description)\n\nif __name__ == \"__main__\":\n app.run(port=8000, debug=True)","repo_name":"asharimh97/practices","sub_path":"python/flasky/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3003886523","text":"from lxml import etree, objectify\nfrom qgis.core import *\nimport sys\nimport numpy as np\nimport os\nimport random, math\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndef parseParking(xmlfile):\n parkingAreas = {}\n with open(xmlfile) as fobj:\n xml = fobj.read()\n xml = bytes(bytearray(xml, encoding='utf-8'))\n additional = etree.parse(xmlfile).getroot()\n\n for parkingArea in additional.getchildren():\n\n #print(parkingArea)\n\n lane = parkingArea.attrib['lane']\n edge = lane.split('_')[0]\n parkingAreas[edge] = parkingArea\n return parkingAreas\n\ndef loadParkingFacility(xmlfile):\n pk_type = os.path.basename(xmlfile).split('_')[0]\n parkingAreas = {}\n total_capacity = 0\n with open(xmlfile) as fobj:\n xml = fobj.read()\n xml = bytes(bytearray(xml, encoding='utf-8'))\n additional = etree.parse(xmlfile).getroot()\n\n for parkingArea in additional.getchildren():\n\n pk = {}\n pk['id'] = parkingArea.attrib['id']\n pk['type'] = pk_type\n pk['capacity'] = int(parkingArea.attrib['roadsideCapacity'])\n pk['occupancy'] = 0\n pk['rate'] = 0\n\n parkingAreas[pk['id']] = pk\n total_capacity += pk['capacity']\n return parkingAreas, total_capacity\n\ndef splitDropoffAndOnParking(total_parking_xml, drop_off_only_percentage):\n root = etree.parse(total_parking_xml).getroot()\n additional = objectify.Element(\"additional\")\n for parking in root.getchildren():\n if np.random.random() < 1 - drop_off_only_percentage:\n additional.append(parking)\n return additional\n\ndef reallocateOffParkingCapacity(base_off_parking, reallocate_percentage):\n if reallocate_percentage == 0:\n return etree.parse(base_off_parking).getroot()\n\n parkingAreas_dict, total_capacity = loadParkingFacility(base_off_parking)\n parkingAreas_obj = parseParking(base_off_parking)\n reallocate_capacity = int(total_capacity * reallocate_percentage)\n reallocate_amount = 0\n original_count = len(parkingAreas_dict)\n removed_count = 0\n while True:\n rand_element = random.choice(list(parkingAreas_dict.keys()))\n rand_capacity = parkingAreas_dict[rand_element]['capacity']\n if reallocate_amount + rand_capacity < 0.95 * reallocate_capacity:\n reallocate_amount += rand_capacity\n del parkingAreas_dict[rand_element]\n removed_count += 1\n elif reallocate_amount + rand_capacity > 1.05 * reallocate_capacity:\n continue\n else:\n reallocate_amount += rand_capacity\n del parkingAreas_dict[rand_element]\n removed_count += 1\n break\n remain_capacity = total_capacity - reallocate_amount\n additional = objectify.Element(\"additional\")\n\n logging.debug('Original capacity is: ({}, {})'.format(original_count, total_capacity))\n logging.debug('Reallocated capacity is: ({}, {})'.format(removed_count, reallocate_amount))\n logging.debug('Left capacity is: ({}, {})'.format(len(parkingAreas_dict), remain_capacity))\n\n total_capacity_new = 0\n for key in parkingAreas_dict.keys():\n edge = key.split('_')[1]\n original_capacity = int(parkingAreas_obj[edge].attrib['roadsideCapacity'])\n expanded_capacity = int(original_capacity/remain_capacity * reallocate_amount) + original_capacity\n parkingAreas_obj[edge].attrib['roadsideCapacity'] = str(expanded_capacity)\n additional.append(parkingAreas_obj[edge])\n total_capacity_new += expanded_capacity\n\n logging.debug('After reallocation, total capacity is: ({}, {})'.format(len(parkingAreas_dict), total_capacity_new))\n\n return additional\n\n\ndef edgeToTAZs(edges, features_geometry, net, xform_reverse):\n TAZ_edge_dict = {}\n offset = net.getLocationOffset()\n for i in range(len(features_geometry)):\n TAZ_edge_dict[str(i)] = []\n for edge in edges:\n index = 0\n edge_id = edge.getID()\n edge_from_node = edge.getFromNode()\n edge_to_node = edge.getToNode()\n from_coord = edge_from_node.getCoord()\n to_coord = edge_to_node.getCoord()\n from_x = from_coord[0] - offset[0]\n from_y = from_coord[1] - offset[1]\n to_x = to_coord[0] - offset[0]\n to_y = to_coord[1] - offset[1]\n mid_coord = ((from_x + to_x) / 2, (from_y + to_y) / 2)\n mid_coord_tr = xform_reverse.transform(mid_coord[0], mid_coord[1])\n midPointGeometry = QgsGeometry.fromPointXY(mid_coord_tr)\n for feature_geometry in features_geometry:\n if feature_geometry.contains(midPointGeometry):\n TAZ_edge_dict[str(index)].append(edge_id)\n break\n else:\n index += 1\n continue\n if index == len(features_geometry):\n print('Edge {} is not within boundary.'.format(edge_id))\n\n return TAZ_edge_dict\n\ndef parkingToTAZs(features_geometry, parkingAreas, net, xform_reverse):\n TAZ_parking_dict = {}\n offset = net.getLocationOffset()\n for i in range(len(features_geometry)):\n TAZ_parking_dict[str(i)] = []\n for key in parkingAreas:\n index = 0\n parking_edge_id = key\n parking_edge = net.getEdge(parking_edge_id)\n edge_from_node = parking_edge.getFromNode()\n edge_to_node = parking_edge.getToNode()\n from_coord = edge_from_node.getCoord()\n to_coord = edge_to_node.getCoord()\n from_x = from_coord[0] - offset[0]\n from_y = from_coord[1] - offset[1]\n to_x = to_coord[0] - offset[0]\n to_y = to_coord[1] - offset[1]\n mid_coord = ((from_x + to_x)/2, (from_y + to_y)/2)\n mid_coord_tr = xform_reverse.transform(mid_coord[0], mid_coord[1])\n midPointGeometry = QgsGeometry.fromPointXY(mid_coord_tr)\n for feature_geometry in features_geometry:\n if feature_geometry.contains(midPointGeometry):\n TAZ_parking_dict[str(index)].append(parkingAreas[key])\n else:\n index += 1\n continue\n # if key not in parking_not_inside_taz_tey:\n # parking_not_inside_taz_tey.append((key, midPointGeometry))\n if index == len(features_geometry):\n print('Parkingarea {} is not within boundary.'.format(parking_edge_id))\n return TAZ_parking_dict\n\ndef closestTazWithParking(on_dict, off_dict, dropoff_dict, geo_features):\n on_close = np.zeros(len(on_dict)).astype(int)\n off_close = np.zeros(len(off_dict)).astype(int)\n dropoff_close = np.zeros(len(dropoff_dict)).astype(int)\n for key, pa in on_dict.items():\n if pa != []:\n on_close[int(key)] = key\n else:\n origin_pt = geo_features[int(key)].centroid()\n dist = float(\"inf\")\n target = NULL\n for i in range(len(on_dict)):\n if on_dict[str(i)] == []:\n continue\n dest_pt = geo_features[i].centroid()\n dist_tmp = origin_pt.distance(dest_pt)\n if dist_tmp < dist:\n dist = dist_tmp\n target = i\n if target == NULL: # for 100% drop -off_only case, target is NULL; on_close[int(key)] = target will throw error\n target = -1\n on_close[int(key)] = target\n\n for key, pa in off_dict.items():\n if pa != []:\n off_close[int(key)] = key\n else:\n origin_pt = geo_features[int(key)].centroid()\n dist = float(\"inf\")\n target = NULL\n for i in range(len(off_dict)):\n if off_dict[str(i)] == []:\n continue\n dest_pt = geo_features[i].centroid()\n dist_tmp = origin_pt.distance(dest_pt)\n if dist_tmp < dist:\n dist = dist_tmp\n target = i\n off_close[int(key)] = target\n\n for key, pa in dropoff_dict.items():\n if pa != []:\n dropoff_close[int(key)] = key\n else:\n origin_pt = geo_features[int(key)].centroid()\n dist = float(\"inf\")\n target = NULL\n for i in range(len(off_dict)):\n if dropoff_dict[str(i)] == []:\n continue\n dest_pt = geo_features[i].centroid()\n dist_tmp = origin_pt.distance(dest_pt)\n if dist_tmp < dist:\n dist = dist_tmp\n target = i\n dropoff_close[int(key)] = target\n\n return on_close, off_close, dropoff_close\n\ndef parkingStats(parkingXML):\n with open(parkingXML) as fobj:\n xml = fobj.read()\n xml = bytes(bytearray(xml, encoding='utf-8'))\n additional = etree.parse(parkingXML).getroot()\n\n #print(additional.tag)\n count = 0\n total_capacity = 0\n for parkingArea in additional.getchildren():\n count += 1\n total_capacity += int(parkingArea.attrib['roadsideCapacity'])\n return count, total_capacity\n\ndef getAllModes(tripXML):\n modes = {}\n count = 0\n with open(tripXML) as fobj:\n xml = fobj.read()\n xml = bytes(bytearray(xml, encoding='utf-8'))\n population = etree.parse(tripXML).getroot()\n for person in population:\n if len(person):\n plan = person[0]\n for child in plan.getchildren():\n if child.tag == 'leg':\n mode = child.attrib['mode']\n if mode not in modes:\n modes[mode] = 1\n print(modes)\n else:\n modes[mode] += 1\n count += 1\n # print(count)\n print(modes)\n\ndef getTripStats(tripXML):\n types = {'on': 0, 'drop_off': 0, 'off': 0}\n direct = {'into': 0, 'out': 0, 'within': 0}\n with open(tripXML) as fobj:\n xml = fobj.read()\n xml = bytes(bytearray(xml, encoding='utf-8'))\n routes = etree.parse(tripXML).getroot()\n for trip in routes:\n trip_id = trip.attrib['id']\n direction = trip.attrib[\"direction\"]\n type = trip_id.split('_')[1]\n if type == 'on':\n types['on'] += 1\n elif type == 'drop-off':\n types['drop_off'] += 1\n else:\n types['off'] += 1\n if direction == 'into':\n direct['into'] += 1\n elif direction == 'out':\n direct['out'] += 1\n else:\n direct['within'] += 1\n print(types, direct)\n\nif __name__ == \"__main__\":\n city = 'san_francisco'\n # parseParking('/home/huajun/Desktop/NCST_parking/cities/fairfield/on-parking.add.xml')\n count, total_capacity = parkingStats('/home/huajun/Desktop/NCST_parking/cities/san_francisco/Scenario_Set_1/off_parking.add.xml')\n print(count, total_capacity)\n # getAllModes('/home/huajun/Desktop/NCST_parking/cities/san_francisco/san_francisco_plans_all_7.xml')\n # getTripStats('/home/huajun/Desktop/NCST_parking/cities/san_francisco/Scenario_Set_1/trip_all_7_with_0.5_drop-off.xml')\n\n # supply path to qgis install location\n QgsApplication.setPrefixPath('/usr', True)\n\n # create a reference to the QgsApplication, setting the\n # second argument to False disables the GUI\n qgs = QgsApplication([], False)\n\n # load providers\n qgs.initQgis()\n layer = QgsVectorLayer(\"../cities/\" + city + \"/shp/\" + city + \".shp\", \"\", \"ogr\")\n if not layer.isValid():\n raise Exception(\"Layer failed to load!\")\n\n # create projection\n crsSrc = QgsCoordinateReferenceSystem(\"EPSG:4326\") # WGS 84\n crsDest = QgsCoordinateReferenceSystem(\"EPSG:32610\") # WGS 84 / UTM zone 10N\n xform = QgsCoordinateTransform(crsSrc, crsDest, QgsProject.instance())\n xform_reverse = QgsCoordinateTransform(crsDest, crsSrc, QgsProject.instance())\n\n features = layer.getFeatures()\n features_geometry = []\n for feature in features:\n # retrieve every feature with its geometry and attributes\n features_geometry.append(feature.geometry())\n\n import os, sys\n if 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\n else:\n sys.exit(\"please declare environment variable 'SUMO_HOME'\")\n\n import sumolib\n # read sumo network\n net = sumolib.net.readNet('../cities/' + city + '/' + city + '.net.xml')\n edges = net.getEdges()\n\n import time\n\n start = time.time()\n TAZ_edge_dict = edgeToTAZs(edges, features_geometry, net, xform_reverse)\n print('Single thread takes {} seconds.'.format(time.time() - start))\n\n","repo_name":"hjchai/NCST_parking","sub_path":"Scripts/parking.py","file_name":"parking.py","file_ext":"py","file_size_in_byte":12829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"74485771035","text":"import io\nimport time\nimport avro.schema\nimport avro.io\nfrom kafka import KafkaProducer\nimport logging\n\nKAFKA_BROKERS = \"kafkaBrokers\"\nSCHEMA_PATH = \"schemaPath\"\nKAFKA = \"kafka\"\n\n\nclass KafkaConnector:\n def __init__(self, config):\n self.conf = config[KAFKA]\n schema = self.conf.get(SCHEMA_PATH)\n self.name = self.__class__.__name__\n with open(schema, \"r\") as f:\n self.schema = avro.schema.Parse(f.read())\n kafka_servers = self.conf.get(KAFKA_BROKERS).split(\",\")\n self.producer = None\n self.consumer = None\n self.producer = KafkaProducer(bootstrap_servers=kafka_servers)\n logging.info(\"KafkaConnector initiated\")\n\n def publish(self, metadata_dict, topic):\n writer = avro.io.DatumWriter(self.schema)\n bytes_writer = io.BytesIO()\n encoder = avro.io.BinaryEncoder(bytes_writer)\n writer.write(metadata_dict, encoder)\n raw_bytes = bytes_writer.getvalue()\n self.producer.send(topic, raw_bytes)\n","repo_name":"VilianLee/Scripts","sub_path":"collector/grpc/KafkaConnector.py","file_name":"KafkaConnector.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"46967129608","text":"import os\nimport logging\n\nfrom pandas import concat\nfrom sklearn.metrics import f1_score\n\nfrom scripts.helpers.utils import validate_keras, add_to_environ\nfrom scripts.helpers.logging_utils import config_logger, create_argparser\nfrom scripts.helpers.yaml_utils import load_yaml, dict_to_yaml_str\nfrom scripts.models import nn_model_factory\n\nfrom run_utils import fit_keras, get_data_pipeline, save_history, save_model_keras, score_keras, read_data\nfrom run_utils import create_folder_structure, save_model_keras, save_vars, save_cv_results\nfrom run_utils import check_config\n\nPROC_NAME = os.path.basename(__file__).split('.')[0]\n\nif __name__ == '__main__':\n\n arguments = create_argparser().parse_args()\n structure = create_folder_structure(arguments.folder)\n add_to_environ(arguments.conf)\n\n logger = logging.getLogger(__name__)\n config_logger(logger, PROC_NAME, arguments.folder)\n \n vars_name = f'vars_{PROC_NAME}.yaml'\n vars_path = os.path.join('vars', vars_name) if arguments.vars is None else arguments.vars\n config_global = load_yaml(vars_path)\n check_config(config_global, nn_model_factory)\n\n df_train, df_test, df_val, categories = read_data(**config_global[\"data\"], val=True)\n\n for model_name, config in config_global['models'].items():\n if arguments.model is not None and arguments.model != model_name:\n continue\n\n logger.info(f'Model {model_name}, params:')\n logger.info(dict_to_yaml_str(config))\n config['best_params'] = {}\n\n logger.info(f'Data processing for {model_name}')\n data_pipeline = get_data_pipeline(config)\n X_train, y_train, features = data_pipeline.fit_transform(df_train)\n X_val, y_val, features = data_pipeline.transform(df_val)\n X_test, y_test, features = data_pipeline.transform(df_test)\n\n df_train_full = concat([df_train, df_val], ignore_index=True)\n X_train_full, y_train_full, features = data_pipeline.fit_transform(df_train_full)\n X_test_full, y_test_full, features = data_pipeline.transform(df_test)\n\n shape = X_train.shape[1: ]\n config['features'] = list(features)\n logger.info(f'X_train shape {X_train.shape}')\n logger.info(f'X_val shape {X_val.shape}')\n logger.info(f'X_test shape {X_test.shape}')\n logger.info(f'X_train_full shape {X_train_full.shape}')\n\n logger.info(f'Grid search model, {model_name}')\n model_fn = nn_model_factory.get_builder(model_name)\n\n init_params = config['init_params'].copy()\n init_params['input_shape'] = shape\n init_params['n_classes'] = len(categories)\n best_params, best_score, results = validate_keras(model_fn, init_params,\n config['param_grids'],\n f1_score, X_train, y_train[:, 0],\n X_val, y_val[:, 0],\n config['callback_params'],\n config['scoring_params'],\n config['fit_params'],\n **config['gv_params'])\n save_cv_results(results, model_name, structure, PROC_NAME)\n\n logger.info(f'Best params: {best_params}')\n logger.info(f'Best score: {best_score}')\n config['best_params'] = best_params\n final_params = init_params.copy()\n final_params.update(best_params)\n\n logger.info(f'Fitting model, {model_name}')\n model, history = fit_keras(model_name, final_params, config['fit_params'],\n config['callback_params'],\n X_train_full, y_train_full, config['seed'])\n\n logger.info(f'Scoring model, {model_name}')\n score_keras(model, model_name, X_test_full, y_test_full, structure, PROC_NAME)\n save_history(history, model_name, structure, PROC_NAME)\n if arguments.save_models:\n save_model_keras(model, model_name, structure, PROC_NAME)\n\n save_vars(config, PROC_NAME, model_name, structure)\n ","repo_name":"oldhroft/kp_research","sub_path":"run_hld_nn.py","file_name":"run_hld_nn.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25541411415","text":"from rest_framework import serializers\nfrom cookbook.recipes.models import Recipe\nfrom cookbook.ingredients.models import Ingredient\nfrom rest_framework.response import Response\nfrom cookbook.ingredients.serializers import IngredientSerializer\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n ingredients = serializers.ListField()\n\n class Meta:\n model = Recipe\n fields = ('id', 'title', 'instructions', 'ingredients')\n\n def create(self, validated_data):\n recipe_instance = Recipe(\n title=validated_data['title'], instructions=validated_data['instructions'])\n recipe_instance.save()\n query_set = Ingredient.objects.filter(\n pk__in=validated_data['ingredients'])\n recipe_instance.ingredients.set(query_set)\n return recipe_instance\n\n def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.email)\n instance.instructions = validated_data.get(\n 'instructions', instance.instructions)\n instance.ingredients = validated_data.get(\n 'ingredients', instance.ingredients)\n instance.save()\n return instance\n","repo_name":"donghee214/django-graphene","sub_path":"cookbook/recipes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"408647265","text":"from __future__ import print_function\n\nimport csv\nimport sys\n\ncsv.field_size_limit(sys.maxsize)\n\nif len(sys.argv) < 3:\n print(\" Incorrect number of arguments\")\n print(\" 1. result file; 2. output path; 3. db file;\")\n sys.exit()\n\n# The tremolo file\nresult_file = sys.argv[1]\noutput_path = sys.argv[2]\ndb_file = sys.argv[3]\n\n# Lazy, we load everything in memory\nprint(\" Treating the SMILES list: \", end='')\nwith open(db_file, 'rt') as f:\n reader = csv.reader(f)\n header = True\n smiles_store = {}\n cn_store = {}\n mf_store = {}\n mw_store = {}\n cas_store = {}\n pm_store = {}\n for row in reader:\n if header:\n unpd_id_pos = row.index('UNPD_ID')\n smiles_pos = row.index('SMILES')\n cn_pos = row.index('cn')\n mf_pos = row.index('mf') \n mw_pos = row.index('mw') \n cas_pos = row.index('cas')\n pm_pos = row.index('PARENTMASS')\n header = False\n else:\n smiles_store[row[unpd_id_pos]] = row[smiles_pos]\n cn_store[row[unpd_id_pos]] = row[cn_pos].replace(\"\\\"\",\"\\'\")\n mf_store[row[unpd_id_pos]] = row[mf_pos]\n mw_store[row[unpd_id_pos]] = row[mw_pos]\n cas_store[row[unpd_id_pos]] = row[cas_pos]\n pm_store[row[unpd_id_pos]] = row[pm_pos]\n\nprint(\"treated {} compounds\".format(len(smiles_store)))\n\nprint(\" Treating the result file {}\".format(result_file))\n\nwith open(result_file, 'rt') as f:\n reader = csv.reader(f, delimiter='\\t')\n header = False\n output = []\n for row in reader:\n if not header:\n #scan_id_pos = row.index('#Scan#')\n unpd_id_pos = row.index('CompoundName')\n #score_id_pos = row.index('MQScore')\n #ppmError_id_pos = row.index('mzErrorPPM')\n #sharPeaks_id_pos = row.index('LibSearchSharedPeaks')\n row[0] = \"msclusterID\"\n header = row + [\"SMILES\"] + [\"chemicalNames\"] + [\"molecularFormula\"] + [\"molecularWeight\"] + [\"CAS\"] + [\"PARENTMASS\"]\n else:\n # store smiles and compoundName in the results\n temp = row \n temp += [smiles_store[row[unpd_id_pos]]]\n temp += [cn_store[row[unpd_id_pos]]]\n temp += [mf_store[row[unpd_id_pos]]]\n temp += [mw_store[row[unpd_id_pos]]]\n temp += [cas_store[row[unpd_id_pos]]]\n temp += [pm_store[row[unpd_id_pos]]]\n output += [temp]\n\nprint(\"\\nConverting tremolo result to CSV\") \nwith open(output_path+\"/tremolo_results.csv\", 'w') as tsvfile:\n writer = csv.writer(tsvfile, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(header)\n for row in output:\n writer.writerow(row)\n","repo_name":"mwang87/NP3_MS_Workflow","sub_path":"src/ISDB_tremolo_NP3/Data/dbs/treat.py","file_name":"treat.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"50"} +{"seq_id":"2348661701","text":"import numpy as np\nimport finalDataHandler as dataHandler\nimport fern as fern\nimport cDTLearner as cDTL \nimport matplotlib.pyplot as plt \nfrom os import listdir\n\n# 'wdbc.csv' / 'mushrooms.csv'\ndata_set_name = 'wdbc.csv'\n\n# load data\nX, Y, feature_names, codeing = dataHandler.read_data(f_name = data_set_name)\n\nif data_set_name == 'wdbc.csv':\n\tfeature_types = ['c']*X.shape[1]\nelif data_set_name == 'mushrooms.csv':\n\tfeature_types = ['d']*X.shape[1]\nelse:\n\tprint('wrong dataset')\n\texit()\n\n\n# split into training, testing, and validation sets\nX_train,Y_train,X_test,Y_test,X_vali,Y_vali = dataHandler.split_sets(X, Y, ratio_train=.7, ratio_test=.3)\n\n\n\n\n'''\nplease put our respective learner inside this loop.\nSave the training results.\n\nbefore computing the effect of noise, please find optimal \nparameters for your learner.\n\n'''\n\n\n\nfile_name = 'fern'\nextension = '_noise_'+data_set_name.split('.')[0]\n\nperformances = []\n\nnoise_spacing = np.logspace(0,1,10)-1\nfor l in noise_spacing:\n\tperf = []\n\tprint('noise level: {0}'.format(l))\n\tfor iteration in range(50):\n\t\t# PUT YOUR LEARNER HERE \n\t\tX_train_noisy = dataHandler.noise(X_train, level = l, random_seed = iteration)\n\t\t'''\n\t\ttree\n\t\t'''\n\t\t'''\n\t\ttree = cDTL.Learner(max_depth = 6, feature_names = feature_names)\n\t\ttree.learn(X_train_noisy,Y_train, feature_types)\n\t\tY_hat = [tree.predict(x) for x in X_test]\n\t\t'''\n\t\t# ferns\n\t\tif (data_set_name == 'mushrooms.csv'):\n\t\t\tferny = fern.Fern(X_train_noisy, Y_train, 6, continuous=False)\n\t\telse:\n\t\t\tferny = fern.Fern(X_train_noisy, Y_train, 6, continuous=True)\n\t\tY_hat = ferny.pred(X_test, Y_test)\n\n\t\t'''\n\t\tforest \n\t\t'''\n\t\t'''\n\t\tn_trees = 200\n\t\tforest = cDTL.ForestLearner(X,Y, feature_types, \n\t\t\t\tn_trees = n_trees, max_depth = 1, ensemble_tree_depth = 7,\n\t\t\t\tbatch_size = 20,\n\t\t\t\tfeature_names = feature_names)\n\t\tY_hat = [forest.predict(x) for x in X_test]\n\t\t'''\n\t\tperf.append(np.mean(Y_hat==Y_test))\n\t\tprint('\t\titeration: {0} | performance: {1}'.format(iteration+1,np.mean(Y_hat==Y_test)))\n\tperformances.append(np.mean(perf))\n\nperformances = np.array(performances)\n\nresult = np.vstack((noise_spacing,performances))\n\n\nif file_name+extension in [l.split('.')[0] for l in listdir()]:\n\tprint('denk dir n anderen dateinamen aus >:( ')\nelse:\n\tnp.save(file_name+extension, result)\n\n\nplt.plot(performances)\nplt.show()\n\n","repo_name":"NilusvanEdel/EnsembleMethods","sub_path":"final_experiment/main_NOISE.py","file_name":"main_NOISE.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"72900322075","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 27 01:04:45 2021\r\n\r\n@author: LF\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 26 17:13:34 2021\r\n\r\n@author: LF\r\n\"\"\"\r\n\r\nimport glob\r\nimport numpy as np\r\nimport mne\r\nfrom mne.time_frequency import tfr_morlet\r\nfrom mne.stats import permutation_cluster_1samp_test\r\nimport matplotlib.pyplot as plt\r\n#path = r'F:\\data\\457_cue_epoch'\r\n#path = r'F:\\data\\HC_cue_epoch'\r\n#path = r'F:\\data\\457_stimulus_epoch'\r\npath = r'F:\\data\\HC_stimulus_epoch'\r\nfilepath = glob.glob(path+'/*.fif')\r\npower_zeros = []\r\ndef power_(epochs):\r\n epochs = epochs\r\n freqs = np.arange(4, 30, 1) \r\n n_cycles = freqs / 2.\r\n power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,\r\n return_itc=False, decim=5, n_jobs=1,average=False,picks = ['Pz','Cz','Fz'])\r\n return power\r\n# return :'AverageTFR'\r\nfor i in range(0,2,2):\r\n a = filepath[i]\r\n epochs_Congruent = mne.read_epochs(filepath[i], preload=True)\r\n epochs_Congruent.drop_channels('EOG')\r\n power_Congruent = power_(epochs_Congruent)\r\n epochs_InCongruent = mne.read_epochs(filepath[i+1], preload=True)\r\n epochs_InCongruent.drop_channels('EOG')\r\n power_InCongruent = power_(epochs_InCongruent)\r\n epochs_power = power_InCongruent.data\r\nsensor_adjacency, ch_names = mne.channels.find_ch_adjacency(epochs_InCongruent.info,'eeg')\r\nuse_idx = [ch_names.index(ch_name.replace(' ', ''))\r\n for ch_name in power_InCongruent.ch_names]\r\nsensor_adjacency = sensor_adjacency[use_idx][:, use_idx]\r\nassert sensor_adjacency.shape == \\\r\n (len(power_InCongruent.ch_names), len(power_InCongruent.ch_names))\r\nassert epochs_power.data.shape == (len(epochs_power),\r\n len(power_InCongruent.ch_names),\r\n len(power_InCongruent.freqs),\r\n len(power_InCongruent.times))\r\nadjacency = mne.stats.combine_adjacency(\r\n sensor_adjacency, len(power_InCongruent.freqs), len(power_InCongruent.times))\r\n\r\n# our adjacency is square with each dim matching the data size\r\nassert adjacency.shape[0] == adjacency.shape[1] == \\\r\n len(power_InCongruent.ch_names) * len(power_InCongruent.freqs) * len(power_InCongruent.times)\r\nn_permutations = 5000 # Warning: 50 is way too small for real-world analysis.\r\nT_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(\r\n epochs_power, n_permutations=n_permutations,\r\n threshold=14 , tail=0,\r\n adjacency=adjacency,\r\n out_type='mask', verbose=True)\r\nT_obs_plot = np.nan * np.ones_like(T_obs)\r\nfor c, p_val in zip(clusters, cluster_p_values):\r\n if p_val <= 0.05:\r\n T_obs_plot[c] = T_obs[c]\r\n\r\n# Just plot one channel's data\r\nch_idx, f_idx, t_idx = np.unravel_index(\r\n np.nanargmax(np.abs(T_obs_plot)), epochs_power.shape[1:])\r\n# ch_idx = tfr_epochs.ch_names.index('MEG 1332') # to show a specific one\r\n\r\nvmax = np.max(np.abs(T_obs))\r\nvmin = -vmax\r\nplt.subplot(2, 1, 1)\r\nplt.imshow(T_obs[ch_idx], cmap=plt.cm.gray,\r\n aspect='auto', origin='lower', vmin=vmin, vmax=vmax)\r\nplt.imshow(T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r,\r\n aspect='auto', origin='lower', vmin=vmin, vmax=vmax)\r\nplt.colorbar()\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Frequency (Hz)')\r\nplt.title(f'Induced power')","repo_name":"LiFeng-SECUC/Sub-concussion-Analysis","sub_path":"组间备份.py","file_name":"组间备份.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"38775840196","text":"\"\"\"\n\n Created on 6/27/2016 by Ben\n\n benuklove@gmail.com\n \n Starting from the top left and going to the bottom right,\n only moving right or down, how many routes are there through\n a 20 X 20 grid?\n\n I struggled for days to figure this out programmatically.\n I did notice Pascal's triangle and had to look up the combinatorics.\n\n\"\"\"\n\nfrom math import factorial\n\n# Dimension of grid\nn = 20\n\n# Number of paths are the central binomial coefficients\npaths = int((factorial(2 * n)) / ((factorial(n)) ** 2))\nprint(paths)\n","repo_name":"benuklove/ProjectEuler","sub_path":"Practice2/problem15.py","file_name":"problem15.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"34913694215","text":"class Solution:\n def reverse(self, n: int) -> int:\n if n < 0: re = -n\n if n >= 0: re = n\n new = int(str(re)[::-1])\n if new > 2147483647: return 0\n if n < 0: return -new\n return new\n\ndef main():\n print(reverse(-32))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sarthak2143/leetcode","sub_path":"src/Reverse_integer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5074293444","text":"# fun quiz\n# -------------\n# def start_game():\n# list_user_guess = []\n# score_correct_guess = 0\n# question_num = 1\n#\n# for key in questions:\n# print(key) # print out the questions\n# for i in options[1 - question_num]: # value at i position in each list\n# print(i)\n# user_guess = input(\"choice: \").upper() # make user input to upper case\n# list_user_guess.append(user_guess)\n# score_correct_guess += check_answer(questions.get(key), user_guess) # get pair value in dictionary\n# question_num += 1 # move to next index of options in option\n#\n# display_score(score_correct_guess, list_user_guess)\n#\n#\n# # -------------\n# def check_answer(correct_answer, user_guess):\n# if correct_answer == user_guess:\n# print(\"Correct!\")\n# return 1\n# else:\n# print(\"Incorrect!\")\n# return 0\n#\n#\n# # -------------\n# def display_score(correct_guess, list_user_guess):\n# print(\"\\n---------------\")\n# print(\"Result\")\n# print(\"---------------\")\n# print(\"Your answer: \", end=\" \") # list the answer to the right / no new line\n# for i in list_user_guess:\n# print(i, end=\" \")\n# print(\"\\nCorrect answer: \", end=\" \")\n# for key in questions:\n# print(questions.get(key), end=\" \")\n#\n# score = int((correct_guess / len(questions)) * 100)\n# print(\"\"\n# \"\"\n# \"\\nScore: \" + str(score) + \"%\")\n#\n#\n# # -------------\n# def play_again():\n# response = input(\"\\nWould you like to play again? (yes/no)\").upper()\n# if response == \"Yes\":\n# return True\n# elif response == \"NO\":\n# return False\n# else:\n# print(\"[ Invalid response ]\")\n# return False\n\ndef start_game():\n list_user_answer = []\n question_num = 1\n score_answer = 0\n for key in questions:\n print(key)\n for i in options[question_num - 1]:\n print(i)\n user_answer = input(\"choice: \").upper() # make user input in lower case\n score_answer += check_answer(questions.get(key), user_answer)\n list_user_answer.append(user_answer)\n question_num += 1\n\n display_score(score_answer, list_user_answer)\n\n\ndef check_answer(correct_answer, user_answer):\n if correct_answer == user_answer:\n print(\"[ Correct ]\")\n return 1\n else:\n print(\"[ Incorrect ]\")\n return 0\n\n\ndef display_score(score_answer, list_user_answer):\n print(\"\\n=======\")\n print(\"Result\")\n print(\"=======\")\n print(\"Your answer: \", end=\" \") # end=\" \" means you display on one line / no new line\n for i in list_user_answer:\n print(i, end=\" \")\n print(\"\\nCorrect answer:\", end=\" \")\n for i in questions:\n print(questions.get(i), end=\" \")\n\n score = int((score_answer / len(questions)) * 100)\n\n print(\"\\nScore: \" + str(score) + \"%\")\n\n\ndef play_again():\n response = input(\"\\nWould you like to play again: (yes/no) \").upper()\n if response == \"YES\":\n return True\n elif response == \"NO\":\n return False\n else:\n print(\"\\nInvalid input\")\n\n\nquestions = {\"\\n1. How many colors are there in rainbow? \": \"A\",\n \"\\n2. What does a thermometer measure? \": \"C\",\n \"\\n3. What fruits do raisins come from? \": \"B\",\n \"\\n4. A portrait is a picture of what? \": \"C\",\n \"\\n5. How many cents are in a quarter? \": \"A\"\n }\n\noptions = [[\"A. 7\", \"B. 6\", \"C. 5\"],\n [\"A. Width\", \"B. Length\", \"C. Temperature\"],\n [\"A. Apple\", \"B. Grapes\", \"C. Pineapples\"],\n [\"A. A nation\", \"B. An island\", \"C. A person\"],\n [\"A. 25 cents\", \"B. 20 cents\", \"C. 15 cents\"]\n ]\n\n\nstart_game()\n#\nwhile play_again():\n start_game()\n\nprint(\"\\n== Good Game ==\")\n","repo_name":"Fransssss/pythonFunQuiz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3007200681","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def widthOfBinaryTree(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n from queue import Queue\n\n node_queue = Queue()\n node_queue.put((root, 0, 0)) # (node, depth, width)\n res = 0\n pre_depth = 0\n left_index = 0\n pre_index = 0\n while not node_queue.empty():\n node, depth, index = node_queue.get()\n if depth > pre_depth:\n res = max(res, pre_index - left_index + 1)\n left_index = index\n if node.left:\n node_queue.put((node.left, depth + 1, 2 * index))\n if node.right:\n node_queue.put((node.right, depth + 1, 2 * index + 1))\n pre_index = index\n pre_depth = depth\n res = max(res, index - left_index + 1)\n\n return res\n","repo_name":"Terry-Ma/Leetcode","sub_path":"662-二叉树最大宽度.py","file_name":"662-二叉树最大宽度.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"42018874610","text":"#!/usr/bin/env python3\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.qtum import activate_mpos\nfrom test_framework.qtumconfig import COINBASE_MATURITY\nfrom test_framework.address import byte_to_base58\nfrom test_framework.messages import hash256\nimport time\nimport struct\n\nclass QtumCallContractTimestampTest(BitcoinTestFramework):\n def set_test_params(self):\n self.num_nodes = 1\n self.setup_clean_chain = True\n self.extra_args = [['-lastmposheight=999999']]\n\n def skip_test_if_missing_module(self):\n self.skip_if_no_wallet()\n\n def run_test(self):\n self.node = self.nodes[0]\n privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)\n self.node.importprivkey(privkey)\n\n self.node.generatetoaddress(100 + COINBASE_MATURITY, \"qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq\")\n\n \"\"\"\n pragma solidity ^0.4.11;\n contract Example {\n function timestamp() external returns(uint) {\n return now;\n }\n }\n \"\"\"\n bytecode = \"60606040523415600b57fe5b5b60928061001a6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063b80777ea14603a575bfe5b3415604157fe5b6047605d565b6040518082815260200191505060405180910390f35b60004290505b905600a165627a7a7230582022b5728b8ca07de23857473e303660ad554d6344c64658ab692d741fa8753b380029\"\n self.contract_address = self.node.createcontract(bytecode)['address']\n self.node.generate(1)\n now = int(time.time())\n expected_now = int(self.node.callcontract(self.contract_address, \"b80777ea\")['executionResult']['output'], 16)\n print(now, expected_now)\n assert(expected_now == now or expected_now == now+1)\n activate_mpos(self.node)\n self.node.setmocktime(0)\n now = int(time.time())\n expected_now = int(self.node.callcontract(self.contract_address, \"b80777ea\")['executionResult']['output'], 16)\n print(now, expected_now)\n assert(expected_now == now or expected_now == now+1)\n\nif __name__ == '__main__':\n QtumCallContractTimestampTest().main()\n","repo_name":"qtumproject/qtum","sub_path":"test/functional/qtum_callcontract_timestamp.py","file_name":"qtum_callcontract_timestamp.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":1186,"dataset":"github-code","pt":"50"} +{"seq_id":"28382782979","text":"friends=input(\"please enter the 3 friends name with spaces:-\")\r\nfriends= friends.split(\" \")\r\nmyfile_read=open(\"data.txt\",\"r\")\r\ncontent=myfile_read.read()\r\nmyfile_read.close()\r\ncontent=content.split(\"\\n\")\r\nnearbyfriends=[]\r\nfor friend in friends:\r\n for new in content:\r\n if new==friend:\r\n nearbyfriends.append(friend)\r\nnearbyfriends=\"\\n\".join(nearbyfriends)\r\nmyfile_write=open(\"friendsnearby.txt\",\"w\")\r\nncontent=myfile_write.write(nearbyfriends)\r\nmyfile_write.close()\r\nmyfile_write=open(\"friendsnearby.txt\",\"r\")\r\nnscontent=myfile_write.read()\r\nmyfile_write.close()\r\nprint(nscontent)","repo_name":"GoddatiSaiRamCharan/python_codes_learn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3055717443","text":"\"\"\"registro_incidentes URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom incidentes.views import agregar_accion, agregar_incidente, agregar_persona, borrar_accion, borrar_incidente, detalle_incidente, editar_accion, editar_incidente\nfrom django.contrib import admin\nfrom django.urls import path\nfrom web.views import bienvenida\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', bienvenida, name=\"index\"),\n path(\"detalle_incidente_<int:id>/\", detalle_incidente, name=\"detalle\"),\n path(\"editar_incidente_<int:id>/\", editar_incidente),\n path(\"borrar_incidente_<int:id>/\", borrar_incidente),\n path(\"agregar_incidente/\", agregar_incidente),\n path(\"agregar_persona/\", agregar_persona),\n path(\"detalle_incidente_<int:id>/agregar_acciones/\", agregar_accion),\n path(\"detalle_incidente_<int:id>/editar_accion_<int:id_con>/\", editar_accion),\n path(\"detalle_incidente_<int:id>/borrar_accion_<int:id_con>/\", borrar_accion),\n]\n","repo_name":"ArmandoBerlanga/proyecto_hacsys","sub_path":"registro_incidentes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18023141141","text":"from questions.public.ListNode import ListNode\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = tail = ListNode()\n carry = val = 0\n\n while carry or l1 or l2:\n val = carry\n if l1:\n val += l1.val\n l1 = l1.next\n if l2:\n val += l2.val\n l2 = l2.next\n carry, val = divmod(val, 10)\n tail.next = ListNode(val)\n tail = tail.next\n return head.next\n\n\nclass Solution2:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n \"\"\"\n 第二遍刷 初始化head tail要注意\n :param l1:\n :param l2:\n :return:\n \"\"\"\n head, tail = ListNode(), ListNode()\n head.next = tail\n carry, val = 0, 0\n\n while carry or l1 or l2:\n val = carry\n if l1:\n val += l1.val\n l1 = l1.next\n if l2:\n val += l2.val\n l2 = l2.next\n carry, val = divmod(val, 10)\n tail.next = ListNode(val)\n tail = tail.next\n return head.next\n\n\nif __name__ == '__main__':\n a, b = divmod(10, 3)\n print(a)\n print(b)\n","repo_name":"sun10081/leetcode_practice_xiaorui","sub_path":"questions/1_100/1_10/2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26390006845","text":"#def cc(s):\n# for i in s:\n# if i==i.lower():\n## lst = i.upper()\n# else:\n# i = i.lower()\n# print(lst)\n# return \"\".join(lst)\n\n\n#cc(\"KaJaPf\")\n\ndef c(s):\n lst = [i.upper() if i == i.lower() else i.lower() for i in s]\n return ''.join(lst)\ns = \"AaBbCc\"\nss = c(s)\nprint(ss)","repo_name":"690737692/lianxi01.py","sub_path":"lianxi01.py","file_name":"lianxi01.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3624257125","text":"import pygame \nfrom constantes import *\n\n\nclass Item(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height,type_item=None):\n super().__init__()\n self.width = width\n self.height = height\n self.type_item = type_item\n if self.type_item == 1:\n self.image = pygame.image.load(\"images/item/38.png\")\n elif self.type_item == 2:\n self.image = pygame.image.load(\"images/item/39.png\")\n elif self.type_item == 3:\n self.image = pygame.image.load(\"images/item/51.png\")\n elif self.type_item == 4:\n self.image = pygame.image.load(\"images/item/40.png\")\n self.image = pygame.transform.scale(self.image,(self.width,self.height))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.collition_rect = pygame.Rect(self.rect)\n self.ground_collition_rect = pygame.Rect(self.rect)\n\n \n\n\n def draw(self, screen):\n \n \n if(DEBUG):\n pygame.draw.rect(screen,color=(255,0 ,0),rect=self.collition_rect)\n \n screen.blit(self.image,self.rect)\n ","repo_name":"Nicolas-Avila/Pygame","sub_path":"botin.py","file_name":"botin.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"18346546391","text":"from django.contrib.auth import login\nfrom django.db import connection\nfrom django.shortcuts import redirect, render\nfrom django.template import context\nfrom login.models import DB\nfrom datetime import datetime\nimport requests\n# Create your views here.\n\ndef homeView(request):\n if(request.session.has_key('userId')):\n context = {\n \"firstName\":\"\",\n \"btcAmount\":\"\",\n \"accountBalance\":\"\",\n \"investmentAmount\":\"\",\n \"netgainloss\":\"\",\n \"t1\":0,\n \"t2\":0,\n \"ans\":\"\",\n \"userType\":\"\"}\n\n # if(request.session.get('loggedIn') == False):\n # return redirect('login')\n id = request.session.get('userId')\n context[\"userType\"] = request.session.get('userType')\n db = DB()\n context[\"id\"] = id\n selectUsername = \"select firstName from client where id=(%s)\"\n errorMsg = \"could not select required values\"\n param = (id,)\n clientRowUsername =db.selectPrepared(selectUsername, param, errorMsg)\n if clientRowUsername:\n context[\"firstName\"]=clientRowUsername[0][0]\n\n selectInvestment = \"select investmentAmount from portfolio where id=(%s)\"\n errorMsg = \"could not select required values\"\n param = (id,)\n clientRowInv=db.selectPrepared(selectInvestment,param, errorMsg)\n if clientRowInv:\n context[\"investmentAmount\"]=clientRowInv[0][0]\n\n selectTypeQuery = \"select btcAmount, accountBalance from wallet where id=(%s)\"\n errorMsg = \"could not select required values\"\n param = (id,)\n clientRow = db.selectPrepared(selectTypeQuery, param, errorMsg)\n if clientRow:\n context[\"btcAmount\"] = clientRow[0][0]\n context[\"accountBalance\"] = clientRow[0][1]\n \n\n resQuery=\"select totalBtc from portfolio where id=(%s)\"\n param = (id,)\n errorMsg = \"could not fetch total bitcoins homeview\"\n clientRow1 = db.selectPrepared(resQuery, param, errorMsg)\n if clientRow1:\n context[\"t1\"] = clientRow1[0][0]\n \n selectInvestment=\"select investmentAmount from portfolio where id=(%s)\"\n param = (id,)\n clientRow2 = db.selectPrepared(selectInvestment, param, errorMsg)\n if clientRow2:\n context[\"t2\"] = clientRow2[0][0]\n response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')\n btcRateJson = response.json()\n currentBtcRate = btcRateJson['bpi']['USD']['rate_float']\n\n currentRate=currentBtcRate\n #assuming the currentRate value for the bitcoin as 10.\n ans=currentRate*(context[\"t1\"])-(context[\"t2\"])\n if(ans>0):\n context[\"ans\"]=\"Profit \"+str(ans)\n else:\n context[\"ans\"]=\"Loss \"+str(ans)\n\n return render(request, 'homePage.html', context)\n else:\n return redirect(\"/\")\n\n\n\n# update password for the user\ndef updateProfile(userType, firstName, lastName, phoneNumber, newPassword, id):\n \n db = DB()\n if userType == 'client':\n updateQuery = \"update client set firstName=(%s), lastName=(%s), phoneNumber=(%s) where id=(%s)\"\n else:\n updateQuery = \"update trader set firstName=(%s), lastName=(%s), phoneNumber=(%s) where id=(%s)\"\n \n params = (firstName, lastName, phoneNumber, id)\n errorMsg = \"could not edit profile details\"\n\n row1 = db.insertPrepared(updateQuery, params, errorMsg)\n\n updateQuery = \"update login set password=(%s) where id=(%s)\"\n errorMsg = \"could not update password\"\n params = (newPassword, id)\n\n row2 = db.insertPrepared(updateQuery, params, errorMsg)\n if row1 and row2:\n return True\n return False\n\n#function to verify old password for editing\ndef verifyPassword(oldPassword, id):\n selectPassword = \"select password from login where id=(%s)\"\n errorMsg = \"could not find old password\"\n param = (id,)\n\n db = DB()\n row = db.selectPrepared(selectPassword, param, errorMsg)\n\n if row[0][0] == oldPassword:\n return True\n return False\n\n#function to verify Username while buying\ndef verifyUsername(newusername,id):\n selectUsername = \"select username from users where id=(%s)\"\n errorMsg = \"could not find Username\"\n\n params = (id,)\n db = DB()\n row = db.selectPrepared(selectUsername,params,errorMsg)\n\n if row[0][0] == newusername:\n return True\n return False\n\n#function to check if user has sufficient balance\ndef verifyBalance(enteredfiat,id):\n selectBalance = \"select accountBalance from wallet where id=(%s)\"\n errorMsg = \"could not find Username\"\n\n params = (id,)\n db = DB()\n row = db.selectPrepared(selectBalance,params,errorMsg)\n\n if row[0][0]>=enteredfiat:\n return True\n return False\n\n#function to update wallet after buy operation\ndef updatewallet(finalbitcoins,enteredfiat,newusername, updateWalletUserId):\n selectId = \"select id from users where username=(%s)\"\n errorMsg = \"could not find Id\"\n params = (newusername,)\n db = DB()\n row = db.selectPrepared(selectId,params,errorMsg)\n userId = row[0][0]\n\n if str(userId) != updateWalletUserId:\n updateClientBtc = \"update wallet set btcAmount=btcAmount+(%s) where id=(%s)\"\n updateQuery = \"update wallet set accountBalance=accountBalance-(%s) where id=(%s)\"\n errorMsg = \"cannot update clients wallet for trader transaction\"\n params1 = (finalbitcoins,userId,)\n params2 = (enteredfiat,updateWalletUserId,)\n row = db.insertPrepared(updateClientBtc,params1,errorMsg)\n else:\n updateQuery = \"update wallet set btcAmount=btcAmount+(%s),accountBalance=accountBalance-(%s) where id=(%s)\"\n params2 = (finalbitcoins,enteredfiat,updateWalletUserId,)\n \n errorMsg = \"could not update wallet\"\n row = db.insertPrepared(updateQuery,params2,errorMsg)\n if row:\n return userId\n return False\n\n#function for adding transactions in db table\ndef addtransaction(context,id,commtype,enteredfiat,commamount,buttontype,finalbitcoins,btcrate, userId):\n\n db = DB()\n\n selectWallet = \"select id from wallet where id=(%s)\"\n errorMsg = \"could not find wallet id\"\n params = (id,)\n row = db.selectPrepared(selectWallet,params,errorMsg)\n walletId = row[0][0]\n status=\"Success\"\n now = datetime.now()\n temp = int(id)\n insertQuery = \"update portfolio set investmentAmount = investmentAmount+(%s), totalBtc = totalBtc+(%s) where id=(%s)\"\n tuple = (enteredfiat, finalbitcoins,temp)\n errorMsg = \"not working\"\n row = db.insertPrepared(insertQuery, tuple, errorMsg)\n if context[\"userType\"]== \"client\":\n\n insertQuery = \"Insert into transaction(clientId, traderId, commissionType, totalAmount, commissionAmount, orderType, status, date, btcAmount, btcRate, walletId) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n errorMsg = \"could not add transaction\"\n tradertype = None\n params = (id,tradertype,commtype,enteredfiat,commamount,buttontype,status,now,finalbitcoins,btcrate,walletId)\n row = db.insertPrepared(insertQuery,params, errorMsg) \n if row:\n return True\n return False\n\n elif context[\"userType\"]== \"trader\":\n\n insertQuery = \"Insert into transaction(clientId, traderId, commissionType, totalAmount, commissionAmount, orderType, status, date, btcAmount, btcRate, walletId) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n errorMsg = \"could not add transaction\"\n\n params = (userId,id,commtype,enteredfiat,commamount,buttontype,status,now,finalbitcoins,btcrate,walletId)\n row = db.insertPrepared(insertQuery,params, errorMsg) \n if row:\n return True\n return False\n\ndef editProfileView(request):\n if(request.session.has_key('userId')):\n db = DB()\n context = {\n \"firstName\" : \"\",\n \"lastName\" : \"\",\n \"phoneNumber\" : \"\",\n \"email\" : \"\",\n \"id\" : -1,\n \"click\" : False,\n \"changed\" : False,\n \"type\" : \"client\",\n }\n\n id = request.session.get('userId')\n userType = request.session.get('userType')\n context['id'] = id\n context[\"type\"] = userType\n\n if request.POST.get(\"epSubmit\"):\n context[\"click\"] = True\n newPassword = str(request.POST.get(\"newPassword\"))\n confirmPassword = str(request.POST.get(\"confirmPassword\"))\n oldPassword = str(request.POST.get(\"oldPassword\"))\n firstName = str(request.POST.get(\"firstName\"))\n lastName = str(request.POST.get(\"lastName\"))\n phoneNumber = str(request.POST.get(\"phoneNumber\"))\n if verifyPassword(oldPassword, id):\n if(newPassword == confirmPassword):\n if updateProfile(userType, firstName, lastName, phoneNumber, newPassword, id):\n context[\"changed\"] = True\n if context[\"type\"] == \"trader\":\n return render(request, 'traderTransactionHistory.html', context)\n\n\n if userType == 'client':\n selectQuery = \"select firstName, lastName, phoneNumber from client where id = (%s)\"\n else:\n selectQuery = \"select firstName, lastName, phoneNumber from trader where id = (%s)\"\n errorMsg = \"Could not find the particular user in edit profile\"\n params = (id,)\n clientRow = db.selectPrepared(selectQuery, params, errorMsg)\n\n if clientRow:\n context[\"firstName\"] = clientRow[0][0]\n context[\"lastName\"] = clientRow[0][1]\n context[\"phoneNumber\"] = clientRow[0][2]\n\n \n selectEmail = \"select username from users where id = (%s)\"\n param = (id,)\n emailRow = db.selectPrepared(selectEmail, param, errorMsg)\n \n if emailRow:\n context[\"email\"] = emailRow[0][0]\n \n return render(request, 'editProfile.html', context)\n else:\n return redirect(\"/\")\n \n\n\n#view for transaction history\n\ndef transactionHistoryView(request):\n if(request.session.has_key('userId')):\n return render(request, 'transactionHistory.html')\n else:\n return redirect(\"/\")\n\n\n#view for transaction history\ndef transactionHistoryByTraderView(request):\n return render(request, 'transactionHistory.html')\n\n\n#view for buy tab\ndef buyView(request):\n if(request.session.has_key('userId') == None):\n return redirect('/')\n db = DB()\n context = {\n \"accountbalance\" : \"\",\n \"click\" : False,\n \"nameverified\" : False,\n \"balanceverified\" : False,\n \"btcrate\" : -1,\n \"commtype\" : \"\",\n \"commrate\" : \"\",\n \"updatedwallet\" : False,\n \"commissionverified\" : False,\n \"userType\" : \"client\",\n \"transactionadded\" : False,\n }\n\n id = request.session.get('userId')\n context[\"id\"] = id\n\n userType = request.session.get('userType')\n context[\"userType\"]= userType\n response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')\n btcRateJson = response.json()\n currentBtcRate = btcRateJson['bpi']['USD']['rate_float']\n context['btcrate'] = currentBtcRate\n\n\n selectAccountBalance = \"select accountBalance from wallet where id= (%s)\"\n errorMsg = \"Could not find accountBalance\"\n\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBalance,params, errorMsg)\n if accountBlance:\n context[\"accountbalance\"] = accountBlance[0][0]\n\n if request.POST.get(\"buysubmit\"):\n context[\"click\"] = True\n\n newusername = str(request.POST.get(\"username\"))\n enteredfiat = float(request.POST.get(\"fiatamt\"))\n commtype = str(request.POST.get(\"btcfiat\"))\n buttontype = str(request.POST.get(\"buysubmit\"))\n \n #find clientID for whom to buy bitcoins\n selectId = \"select id from users where username=(%s)\"\n errorMsg = \"could not find Id\"\n\n db = DB()\n params = (newusername,)\n row = db.selectPrepared(selectId,params,errorMsg)\n if row:\n userId = row[0][0]\n else:\n context[\"nameverified\"] = False\n return render(request, 'buy.html', context)\n\n \n #find commission rate for the user of trader\n selectUserCommType = \"select type from client where id=(%s)\"\n errorMsg = \"Could not find users commission type\"\n\n params = (userId,)\n commType = db.selectPrepared(selectUserCommType,params, errorMsg)\n if commType:\n context[\"commissionverified\"] = True\n context[\"commtype\"] = commType[0][0]\n\n if context[\"commtype\"] == \"silver\":\n selectUserCommRate = \"select commissionSilver from metadata;\"\n errorMsg = \"Could not find users commission rate\"\n\n commRate = db.select(selectUserCommRate, errorMsg)\n if commRate:\n context[\"commrate\"] = commRate[0][0]\n\n elif context[\"commtype\"] == \"gold\":\n selectUserCommRate = \"select commissionGold from metadata;\"\n errorMsg = \"Could not find users commission rate\"\n\n commRate = db.select(selectUserCommRate, errorMsg)\n if commRate:\n context[\"commrate\"] = commRate[0][0]\n\n commrate = float(context[\"commrate\"])\n btcrate = currentBtcRate\n finalbitcoins = 0.0\n\n if verifyUsername(newusername, userId):\n context[\"nameverified\"] = True\n if verifyBalance(enteredfiat,id):\n context[\"balanceverified\"] = True\n\n if commtype == \"fiat\":\n finalbitcoins = (enteredfiat*(1-(commrate)/100))/btcrate\n elif commtype == \"bitcoin\":\n finalbitcoins = (enteredfiat*(1-(commrate)/100))/btcrate\n\n commamount = (enteredfiat*commrate)/100\n userId = updatewallet(finalbitcoins,enteredfiat,newusername, id)\n if userId:\n context[\"updatedwallet\"] = True\n selectAccountBalance = \"select accountBalance from wallet where id=(%s)\"\n errorMsg = \"Could not find accountBalance\"\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBalance,params,errorMsg)\n\n if accountBlance:\n context[\"accountbalance\"] = accountBlance[0][0]\n if addtransaction(context,id,commtype,enteredfiat,commamount,buttontype,finalbitcoins,btcrate, userId):\n context[\"transactionadded\"] = True\n\n updateQuery = \"update metadata set totalBtc=totalBtc-(%s), totalCurrency=totalCurrency+(%s)\"\n errorMsg = \"could not update metadata\"\n db = DB()\n params = (finalbitcoins,enteredfiat,)\n db.insertPrepared(updateQuery,params, errorMsg)\n else:\n return render(request, 'buy.html', context)\n \n return render(request, 'buy.html', context)\n\n#view for sell tab\ndef sellView(request):\n if(request.session.has_key('userId')==None):\n return redirect(\"/\")\n context = {\n \"id\" : -1,\n \"verification\" : True,\n \"btcCap\" : False,\n \"userType\" : \"\",\n \"btcRate\" : -1,\n \"btcAmount\" : 0,\n \"click\" : False\n }\n id = request.session.get('userId')\n context[\"id\"] = id\n response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')\n btcRateJson = response.json()\n currentBtcRate = btcRateJson['bpi']['USD']['rate_float']\n context['btcRate'] = currentBtcRate\n db = DB()\n userType = request.session.get('userType')\n context[\"userType\"]= userType\n\n selectAccountBtc = \"select btcAmount from wallet where id= (%s)\"\n errorMsg = \"Could not find accountBalance\"\n\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBtc,params, errorMsg)\n if accountBlance:\n context[\"btcAmount\"] = accountBlance[0][0]\n\n if request.POST.get(\"sellSubmit\"):\n context[\"click\"] = True\n username = str(request.POST.get(\"userName\"))\n sellBitcoins = float(request.POST.get(\"bitcoins\"))\n commType = request.POST.get(\"btcfiat\")\n\n #query to get id of client\n selectQuery = \"select id from users where username=(%s)\"\n errorMsg = \"couldnt find user\"\n\n params = (username,)\n row = db.selectPrepared(selectQuery,params, errorMsg)\n if row:\n clientId = row[0][0]\n else:\n context[\"verification\"] = False\n return render(request, 'sell.html', context)\n\n #query to check bitcoins in users wallet\n selectQuery = \"select id, btcAmount from wallet where id= (%s)\"\n errorMsg = \"could not fetch number bitcoins from wallet\"\n params = (clientId,)\n row = db.selectPrepared(selectQuery,params, errorMsg)\n if row:\n walletId = row[0][0]\n totalBitcoins = row[0][1]\n context[\"btcAmount\"] = totalBitcoins\n else:\n context[\"verification\"] = False\n return render(request, 'sell.html', context)\n\n if totalBitcoins < sellBitcoins:\n context[\"btcCap\"] = True\n return render(request, 'sell.html', context)\n\n #calculate remaining btc to update user wallet and also update bank wallet\n updateBtcUser = totalBitcoins - sellBitcoins\n currentRate = 10\n\n #get rate of user depending on type\n selectTypeQuery = \"select type from client where id=(%s)\"\n errorMsg = \"could not find type from client in sellView\"\n\n params = (clientId,)\n row = db.selectPrepared(selectTypeQuery,params, errorMsg)\n if row:\n userCategory = row[0][0]\n\n if userCategory == \"silver\":\n getRateQuery = \"select commissionSilver from metadata;\"\n else:\n getRateQuery = \"select commissionGold from metadata;\"\n\n errorMsg = \"cannot get the rate from metadata\"\n\n row = db.select(getRateQuery, errorMsg)\n if row:\n commissionRate = row[0][0]\n \n #need to update bitcoin rate here from coindesk api\n totalAmount = sellBitcoins * currentBtcRate\n commissionAmount = totalAmount * (commissionRate/100)\n metaCurrency = totalAmount - commissionAmount\n #total amount obtained after selling bitcoin\n\n #update user wallet\n updateBtcWalletQuery = \"update wallet set btcAmount=(%s) where id=(%s)\"\n errorMsg = \"could not update client wallet after sell\"\n\n params = (updateBtcUser,clientId,)\n row = db.insertPrepared(updateBtcWalletQuery,params, errorMsg)\n\n updateWalletFiatQuery = \"update wallet set accountBalance=accountBalance+(%s) where id=(%s)\"\n errorMsg = \"could not update user wallet for amount\"\n\n params = (metaCurrency,clientId,)\n row = db.insertPrepared(updateWalletFiatQuery,params, errorMsg)\n \n #add to transaction\n addtransaction(context, id, commType, totalAmount, commissionAmount, \"sell\", sellBitcoins, currentBtcRate, clientId)\n\n #add to metadata\n updateMetaQuery = \"Update metadata set totalBtc=totalBtc +(%s), totalCurrency=totalCurrency-(%s)\" \n errorMsg = \"cannot update metadata\"\n params = (sellBitcoins,metaCurrency,)\n row = db.selectPrepared(updateMetaQuery,params, errorMsg)\n\n selectAccountBtc = \"select btcAmount from wallet where id= (%s)\"\n errorMsg = \"Could not find accountBalance\"\n\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBtc,params, errorMsg)\n if accountBlance:\n context[\"btcAmount\"] = accountBlance[0][0]\n\n return render(request, 'sell.html', context)\n\n#view for wallet tab\ndef walletView(request):\n if(request.session.has_key('userId')==None):\n return redirect(\"/\")\n db = DB()\n context = {\n \"fiatbalance\" : \"\",\n \"btcbalance\" : \"\",\n \"type\" : \"\",\n \"addedMoney\" : False,\n }\n\n balance = request.POST.get(\"addamt\")\n id = request.session.get('userId')\n context[\"id\"] = id\n userType = request.session.get('userType')\n context[\"type\"] = userType\n\n selectAccountBalance = \"select btcAmount, accountBalance from wallet where id=(%s)\"\n errorMsg = \"Could not find accountBalance\"\n\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBalance,params, errorMsg)\n if accountBlance:\n context[\"btcbalance\"] = accountBlance[0][0]\n context[\"fiatbalance\"] = accountBlance[0][1]\n\n if request.POST.get(\"addamount\"):\n updateQuery = \"update wallet set accountBalance=accountBalance+(%s) where id=(%s)\"\n errorMsg = \"could not update balance\"\n db = DB()\n db.beginTransaction()\n params =(balance,id,)\n row1 = db.insertPrepared(updateQuery,params, errorMsg)\n if row1:\n context[\"addedMoney\"] = True\n now = datetime.now()\n #adding to wallet transactions\n insertQuery = \"Insert into walletTransactions(walletId, amount, date) values((%s),(%s),(%s))\"\n params = (id, balance, now,)\n errorMsg = \"cannot add into wallet history\"\n row2 = db.insertPrepared(insertQuery, params, errorMsg)\n if row2:\n db.commit()\n if not (row1 and row2):\n db.rollback()\n selectAccountBalance = \"select btcAmount, accountBalance from wallet where id=(%s)\"\n errorMsg = \"Could not find accountBalance\"\n params = (id,)\n accountBlance = db.selectPrepared(selectAccountBalance,params, errorMsg)\n if accountBlance:\n context[\"btcbalance\"] = accountBlance[0][0]\n context[\"fiatbalance\"] = accountBlance[0][1]\n\n return render(request, 'wallet.html', context)\n\ndef searchTraderView(request):\n context = {\n \"dict\":[],\n \"type\" : \"\",\n \"type\" : \"client\",\n \"click\" : False,\n \"id\":\"\",\n \"first\":\"\",\n \"last\":\"\",\n \"phone\":\"\",\n \"cell\":\"\",\n \"state\":\"\",\n \"city\":\"\",\n \"foundrec\": False,\n }\n\n if request.POST.get(\"searchtrader\"):\n context[\"click\"] = True\n name = str(request.POST.get(\"name\"))\n option = str(request.POST.get(\"dropdownoption\"))\n\n\n db=DB()\n if option==\"id\":\n selectName = \"select id,firstName,lastName,state,city,phoneNumber,cellNumber from trader where id=(%s)\"\n errorMsg = \"Could not find trader\"\n params = (name,)\n\n elif option==\"firstName\":\n selectName = \"select id,firstName,lastName,state,city,phoneNumber,cellNumber from trader where firstName=(%s)\"\n errorMsg = \"Could not find trader\"\n params = (name,)\n\n elif option==\"state\":\n selectName = \"select id,firstName,lastName,state,city,phoneNumber,cellNumber from trader where state=(%s)\"\n errorMsg = \"Could not find trader\"\n params = (name,)\n\n elif option==\"city\":\n selectName = \"select id,firstName,lastName,state,city,phoneNumber,cellNumber from trader where city=(%s)\"\n errorMsg = \"Could not find trader\"\n params = (name,)\n\n \n traderdetails = db.selectPrepared(selectName,params, errorMsg)\n\n searchDetails = []\n if traderdetails:\n context[\"foundrec\"] = True\n for i in traderdetails:\n temp={}\n temp[\"id\"] = i[0]\n temp[\"first\"] = i[1]\n temp[\"last\"] = i[2]\n temp[\"state\"] = i[3]\n temp[\"city\"] = i[4]\n temp[\"phone\"] = i[5]\n temp[\"cell\"] = i[6]\n searchDetails.append(temp)\n\n context[\"dict\"] = searchDetails\n\n return render(request, 'searchTrader.html', context)\n\n","repo_name":"ved18/BtcTrader","sub_path":"client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"10427907232","text":"import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import figaspect\nimport seaborn as sns\nfrom ipywidgets import interactive_output, IntSlider, Play, jslink\nfrom IPython.display import display\n\nnp.random.seed(1234)\nsample = np.random.chisquare(df=3, size=6)\nx = np.linspace(-2, 7, 50).reshape((-1, 1))\n\n\ndef plot(count):\n distributions = stats.norm(loc=sample[:count])\n fig = plt.figure(figsize=figaspect(1))\n ax = fig.gca()\n ax.plot(x, distributions.pdf(x))\n ax.plot(x, distributions.pdf(x).sum(axis=1), color='black', alpha=0.3)\n sns.rugplot(sample, color='black', ax=ax)\n ax.set(xlabel='x', xticks=(), xlim=(x.min(), x.max()), ylim=(0, 1))\n plt.show()\n\n\ndef show():\n count = IntSlider(value=1, min=1, max=sample.size)\n play = Play(interval=1000, value=1, min=1, max=sample.size, step=1)\n jslink((play, 'value'), (count, 'value'))\n output = interactive_output(plot, dict(count=count))\n display(play, output)\n","repo_name":"ScenesK/data-science-lecture","sub_path":"workspace/intermediate/my_functions/kernel_density_estimation/summation.py","file_name":"summation.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"12419494577","text":"import time\nimport datetime\nimport requests\nimport logging.handlers\n\nfrom pymongo.database import Database\nfrom utilities.utils import bearer_oauth, config, load_db\n\n\ndef setup_logging():\n \"\"\"\n Email logging setup\n :return: logger\n \"\"\"\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG,\n filename='place_crawler.log')\n\n logger = logging.getLogger(__name__)\n smtp_handler = logging.handlers.SMTPHandler(\n mailhost=(config['MAIL_HOST'], config['MAIL_HOST_PORT']),\n fromaddr=config['EMAIL_ADDRESS'],\n toaddrs=[config['SENDTO']],\n subject='Place Crawler',\n credentials=(config['EMAIL_ADDRESS'], config['LOGGING_PASSWORD']),\n secure=()\n )\n\n logger.addHandler(smtp_handler)\n return logger\n\n\ndef update_place(tweet_id, place_id, db: Database):\n \"\"\"\n Resolves place.\n :param tweet_id: id of tweet\n :param place_id: id of place\n :param db: connection to remote database\n :return: whether Twitter API was hit\n \"\"\"\n place_collection = db['places']\n cached = place_collection.find_one({'place_id': place_id})\n\n if cached is not None and cached != []:\n return False\n\n response = requests.get(f'https://api.twitter.com/2/tweets?ids={tweet_id}&expansions=geo.place_id&place.fields'\n f'=contained_within,country,country_code,full_name,geo,id,name,place_type',\n auth=bearer_oauth)\n\n if response.status_code == 429:\n raise OverloadException()\n elif response.status_code != 200:\n raise ConnectionError('Cannot get (HTTP {}): {}'.format(response.status_code, response.text))\n\n db['places'].insert_one({'place_id': place_id, 'data': response.json()})\n return True\n\n\ndef crawl_places():\n \"\"\"\n Crawls all stored places for resolving.\n :return:\n \"\"\"\n db = load_db()\n table_size = db['rules_augmented'].count_documents({})\n rng = int(table_size / 10_000)\n\n for i in range(rng):\n raw = list(db['rules_augmented'].find().skip(skip=i * 10_000).limit(limit=10_000))\n\n for x in raw:\n if not x.get('includes', {}).get('places', []):\n continue\n tweet_id = x['data']['id']\n place_id = x['data'].get('geo', {}).get('place_id', None)\n\n hit_api = False\n if place_id is not None:\n hit_api = update_place(tweet_id, place_id, db)\n if hit_api:\n time.sleep(2)\n\n\ndef main(logger):\n \"\"\"\n Main function for crawling places.\n Contains logic for handling crawler errors.\n :param logger:\n :return:\n \"\"\"\n tries = 0\n has_errored = False\n while tries < 10:\n try:\n crawl_places()\n except OverloadException:\n time.sleep(2 ** tries)\n tries += 1\n has_errored = True\n except ConnectionError as e:\n logger.error('error', exc_info=e)\n exit(0)\n if has_errored:\n logger.warning(f'Sleeping for 15 minutes at {datetime.datetime.now()}')\n time.sleep(900)\n else:\n time.sleep(7_200)\n\n\nclass OverloadException(Exception):\n \"\"\"\n Custom exception for handling API overloads.\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n lg = setup_logging()\n while True:\n main(lg)\n","repo_name":"bcd00/Impact-RTC","sub_path":"utilities/crawlers/place_crawler.py","file_name":"place_crawler.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37550862499","text":"class Solution:\n def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:\n intervals.sort()\n prevEnd=intervals[0][1]\n ans=0\n for start, end in intervals[1:]:\n if start>=prevEnd:\n prevEnd=end\n else:\n ans+=1\n prevEnd=min(prevEnd,end)\n return ans ","repo_name":"rohanmandrekar/leetcode","sub_path":"Python/435. Non-overlapping Intervals.py","file_name":"435. Non-overlapping Intervals.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31157797815","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 26 21:57:54 2013\r\n\r\n@author: Maxim\r\n\"\"\"\r\nfrom scalingFunctions import *\r\n\r\ndef get_bounds(x,delta,lb,ub):\r\n bounds = np.zeros([1,2])\r\n bounds[0,0] = max(lb,x-delta)\r\n bounds[0,0] = min(ub,x+delta)\r\n return bounds\r\n\r\ndef run_example_1d():\r\n fhigh = lambda x: (6.0*x-2.0)**2.0*np.sin(12.*x-4.)\r\n flow = lambda x: 0.5*fhigh(x) + 10.*(x-.5)+5\r\n \r\n tol = 1e-6\r\n iterMax = 20\r\n xl = 0.0\r\n xu = 1.0\r\n x0 = 0.5\r\n xdoe = np.array([xl,xu,0.3])\r\n \r\n err = tol+1\r\n nIter = 0\r\n \r\n x = np.linspace(-.1,1.1,50)\r\n yh = np.array(fhigh(_x) for _x in x)\r\n yl = np.array(flow(_x) for _x in x)\r\n \r\n delta = 0.5\r\n fscaled = ScalingFunction(fhigh,flow,'add',3,True)\r\n while err>tol and nIter<iterMax:\r\n fscaled.construct_scaling_model(x0)\r\n bnds = get_bounds(x0,delta,xl,xu)\r\n rslt = minimize(fscaled,x0,'SLSQP',bounds=bnds)\r\n xnew =rslt.x\r\n fnew = rslt.fun\r\n rho = fscaled.get_trust_region_ratio(xnew)\r\n \r\n\r\n\r\nif __name__==\"__main__\":\r\n run_example_1d()","repo_name":"mishin/maxim-codes","sub_path":"VCM/numerical_example_1d.py","file_name":"numerical_example_1d.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"30589255559","text":"# upload images. adapted from:\n# http://flask.pocoo.org/docs/0.12/patterns/fileuploads/\nimport os\nfrom flask import Flask, request, redirect, url_for, render_template, json, jsonify, make_response\nfrom werkzeug.utils import secure_filename\n# import tensorflow scripts\nimport mnist_tf\n\napp = Flask(__name__)\n\n# base64 used to decode the image data\nimport base64\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n # convert base64 data to image file\n # adapt from: http://blog.csdn.net/lxdcyh/article/details/4021476\n # and https://stackoverflow.com/questions/31410525/base64-uri-to-png-python\n image = request.values.get('imageBase64')\n #print(image)\n imgdata = base64.b64decode(image.split(\",\")[1])\n # here we define the img type: png\n # and the name of img is image.png \n # this means everytime the newest img will replace the old one\n # 'wb' here means write binary\n with open('uploads/image.png', 'wb') as fh:\n fh.write(imgdata)\n # get new 28*28 White/Black image\n reDoImage()\n # get predict result\n num = str(guess())\n print('num = '+num)\n # figure out exception 'No 'Access-Control-Allow-Origin' header'\n # adapt from: http://blog.csdn.net/kevin_qq/article/details/51761654\n response = make_response(num) \n response.headers['Access-Control-Allow-Origin'] = '*' \n response.headers['Access-Control-Allow-Methods'] = 'POST' \n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type' \n return response\n return render_template(\"index.html\")\n\nimport numpy as np\ndef guess():\n # use Pillow Library get image\n image = Image.open('uploads/image_28.jpg')\n # convert img to numpy array\n inputs = np.asarray(image)\n # reshape array to [1,784]\n inputs = inputs.reshape(1, 784)\n #print(inputs)\n num = mnist_tf.predict(inputs)\n print(num[0])\n return num[0]\n\n# here use Pillow image library\nfrom PIL import Image\nimport PIL.ImageOps\ndef reDoImage():\n # first, read 'image.png' from dolder uploads\n image = Image.open('uploads/image.png')\n # Convert png to jpeg type using Pillow \n # (from RGBA to RGB)\n # adapt from https://stackoverflow.com/questions/43258461/convert-png-to-jpeg-using-pillow-in-python\n bg = Image.new(\"RGB\", image.size, (255,255,255))\n bg.paste(image,image)\n bg.save('uploads/image.jpg')\n resizeImage()\n\ndef resizeImage():\n bg = Image.open('uploads/image.jpg')\n # Change the colorful image to black/white image\n # adapt from: https://stackoverflow.com/questions/18777873/convert-rgb-to-black-or-white\n gray = bg.convert('L')\n #bw = gray.point(lambda x: 0 if x>128 else 255, '1')\n bw = PIL.ImageOps.invert(gray)\n # Change the image size to 28*28\n # Here I use NEAREST filter\n # more details: https://www.daniweb.com/programming/software-development/code/216637/resize-an-image-python\n new_image = bw.resize((28, 28),Image.NEAREST)\n # save image.jpg\n new_image.save('uploads/image_28.jpg')\n\n# UPLOAD_FOLDER = './uploads'\n# ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n# app = Flask(__name__)\n# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# def allowed_file(filename):\n# return '.' in filename and \\\n# filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n# @app.route('/', methods=['GET', 'POST'])\n# def upload_file():\n# if request.method == 'POST':\n# # check if the post request has the file part\n# if 'file' not in request.files:\n# flash('No file part')\n# return redirect(request.url)\n# file = request.files['file']\n# # if user does not select file, browser also\n# # submit a empty part without filename\n# if file.filename == '':\n# flash('No selected file')\n# return redirect(request.url)\n# if file and allowed_file(file.filename):\n# filename = secure_filename(file.filename)\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n# return redirect(url_for('uploaded_file', filename=filename))\n# return render_template(\"index.html\")\n\n# @app.route('/uploaded_file')\n# def uploaded_file():\n# return 'Thanks for uploding'","repo_name":"TangqiFeng/TensorFlow_MNIST-ML","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22349139994","text":"import numpy as np, cv2\n\ndef onMouse(event, x, y, flags, param = None) :\n global title\n pt = (x, y)\n pt2 = (x + 30, y + 30)\n if event == cv2.EVENT_LBUTTONDOWN :\n cv2.circle(image, pt, 5, 100, 1)\n elif event == cv2.EVENT_RBUTTONDOWN :\n cv2.rectangle(image, pt, pt2, 100, 2) # pt + (30, 30) 은 pt가 튜플 정수값으로 지정되어있기에 변경 불가능 pt + (30, 30) = (x, y, 30, 30) \n cv2.imshow(title, image) # 들여쓰기로 인해 오른쪽 버튼 누를시에만 실행 되었다.\n\nimage = np.ones((300, 300), np.uint8) * 255\n\ntitle = 'Draw Event'\ncv2.namedWindow(title)\ncv2.imshow(title, image)\ncv2.setMouseCallback(title, onMouse)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"NetTatsu/computer_vision_study","sub_path":"Task/ex_4-7-2.py","file_name":"ex_4-7-2.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3845907635","text":"from django.shortcuts import render, get_object_or_404\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom django.core import serializers\r\nfrom django.http import JsonResponse\r\nimport threading\r\nfrom apis.models import Api \r\nfrom .models import Status\r\nimport requests\r\nimport xml.etree.ElementTree as ET\r\nimport datetime\r\n\r\n# Create your views here.\r\ndef index(request):\r\n apis_list = Api.objects.all()\r\n total_apis = apis_list.count()\r\n paginator = Paginator(apis_list, 100)\r\n page = request.GET.get('page')\r\n try:\r\n apis = paginator.page(page)\r\n except PageNotAnInteger:\r\n apis = paginator.page(1)\r\n except EmptyPage:\r\n apis = paginator.page(paginator.num_pages)\r\n \r\n context = {\r\n 'apis' : apis_list,\r\n 'apis_num_pages' : apis.paginator.num_pages\r\n }\r\n \r\n return render(request, 'statuses/index.html', context)\r\n\r\n #response = requests.get('http://127.0.0.1:8000/articles/detail/')\r\n #apis = Api.objects.all()\r\n\r\n # 20분에 한번씩 check 하고 DB에 저장\r\n # for api in apis:\r\n # response = requests.get(api.api_url)\r\n # status_code = ET.fromstring(response.text).findtext(\".//CODE\")\r\n # status = Status(api=api, status=status_code)\r\n # status.save()\r\n\r\n #response = requests.get('http://openapi.seoul.go.kr:8088/7a414542756b316d3132377954467477/xml/MonthlyAverageAirQuality/1/5/201212')\r\n # print(response.text)\r\n # xml_root = ET.fromstring(response.text)\r\n #print(xml_root)\r\n # print((response.json())['MonthlyAverageAirQuality']['RESULT']['CODE'])\r\n # CODE = (response.json())['MonthlyAverageAirQuality']['RESULT']['CODE']\r\n # print(response.status_code)\r\n #for child in xml_root:\r\n # print(child.tag, child.attrib)\r\n \r\n # print(xml_root.find('RESULT/CODE').text)\r\n #print(xml_root[0].text)\r\n # context = {\r\n # 'status_check': 'success',\r\n # }\r\n\r\n \r\n # return render(request, 'statuses/index.html', context)\r\n\r\n\r\ndef detail(request, pk):\r\n # 추후 누적값을 저장할 코드 구현 --> Status app (DB 저장)\r\n api =get_object_or_404(Api,pk=pk)\r\n #print(api.download_users)\r\n context = {\r\n 'msg' : 'success',\r\n 'api_name' : api.api_name,\r\n 'api_url' : api.api_url,\r\n 'latest_modified_date' : api.latest_modified_date,\r\n 'copyright' : api.copyright,\r\n 'copyright_range' : api.copyright_range,\r\n 'api_file' : api.api_file,\r\n 'download_users' : api.download_users.all().count(),\r\n }\r\n return JsonResponse(context)\r\n\r\n\r\ndef update(request, api_pk, api_status):\r\n api = get_object_or_404(Api, pk=api_pk)\r\n status = Status(api=api, status=api_status)\r\n status.save()\r\n context = {\r\n 'status_check': 'success',\r\n }\r\n return JsonResponse(context)\r\n\r\ndef check(request):\r\n apis_list = Api.objects.all()\r\n total_apis = apis_list.count()\r\n paginator = Paginator(apis_list, 100)\r\n page = request.GET.get('page')\r\n\r\n try:\r\n apis = paginator.page(page).object_list\r\n except PageNotAnInteger:\r\n apis = paginator.page(1).object_list\r\n except EmptyPage:\r\n apis = paginator.page(paginator.num_pages).object_list\r\n \r\n apis_json = serializers.serialize('json',apis)\r\n context = {\r\n 'apis' : apis_json\r\n }\r\n return JsonResponse(context)","repo_name":"sehooh5/TIL","sub_path":"Project/PythonProject/ihub/statuses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"39267531734","text":"from bftdetector import BFTDetectorOption\nfrom bftdetector import BFTDetectorLauncherAntiAdblocker\n\nif __name__ == '__main__':\n option = BFTDetectorOption()\n option.test_name = 'latimes'\n page = 'https://www.latimes.com/business/story/2019-12-19/boeing-spacex-spacecraft-parachutes'\n bftd = BFTDetectorLauncherAntiAdblocker(option=option, page=page)\n bftd.perform_analysis()\n bftd.start_test()","repo_name":"jspaper22/bftdetector","sub_path":"examples/latimes.py","file_name":"latimes.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"27042369510","text":"from django.http import HttpResponse\nimport datetime\nfrom decouple import config\n\n\naaaa = 1\nuptime = datetime.datetime.now()\n\n\n# Create your views here.\ndef current_datetime(request):\n SECRET_KEY = config(\"TEST_KEY\", default=\"123\")\n now = datetime.datetime.now()\n global aaaa\n aaaa += 1\n html = f\"<html><body>It is now {now}. test key = {SECRET_KEY}, aaaa = {aaaa}, uptime = {now - uptime}</body></html>\"\n return HttpResponse(html)","repo_name":"Cris-White-Fox/bot-rp-finder","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"6956978789","text":"#invertMatrix.py\nimport numpy as np\n#create vertical arrays\nx1 = np.array([1,4,6])[:, None]\nx2 = np.array([2,1,8])[:, None]\nx3 = np.array([1,5,6])[:, None]\n\nx1 = np.matrix(x1)\nx2 = np.matrix(x2)\nx3 = np.matrix(x3)\n\n#join vectors into a 3x3 matrix\nX = np.concatenate((x1, x2, x3), axis = 1)\ninvert_X = X.getI()\ninvert_X = np.round(invert_X, 2)\n\nprint(\"Array 1\\n\", x1)\nprint(\"Array 2\\n\", x2)\nprint(\"Array 3\\n\", x3)\nprint(\"Matrix of x1, x2, x3\\n\", X)\nprint(\"Inverted Matrix\\n\", invert_X)","repo_name":"MattKrepp1/Econ-411","sub_path":"Learn-Python-for-Stats-and-Econ-master/In Class Projects/In Class Examples Spring 2019/Section 7/invertMatrix.py","file_name":"invertMatrix.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29618776110","text":"\"\"\"ecommerce URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n#from . import \n\nfrom . import views\n# from products.views import (\n# ProductListView, \n# product_list_view, \n# ProductDetailView, \n# product_detail_view,\n# ProductFeaturedListView,\n# ProductFeaturedDetailView,\n# ProductDetailSlugView)\n\n#from carts.views import cart_home\n\nfrom .views import (\n home_page, \n about_page, \n contact_page, \n login_page, \n logout_view,\n register_page)\n\nurlpatterns = [\n path('', views.home_page, name='home'),\n path('about/', views.about_page, name='about'),\n path('contact/', views.contact_page, name='contact'),\n path('login/', views.login_page, name='login'),\n path('logout/', views.logout_view, name='logout'),\n \n \n path('register/', views.register_page, name='register'),\n path('products/', include(('products.urls', 'products'), namespace='products')),\n path('search/', include(('search.urls', 'search'), namespace='search')),\n path('cart/', include(('carts.urls', 'cart'), namespace='cart')),\n\n #path('cart/', cart_home, name='cart'),\n # path('featured/', ProductFeaturedListView.as_view()),\n # path('featured/<int:pk>/', ProductFeaturedDetailView.as_view()),\n # path('products/', ProductListView.as_view()),\n # path('products-fbv/', product_list_view),\n \n # path('products/<slug:slug>/', ProductDetailSlugView.as_view()),\n \n # #path('products/<int:pk>/', ProductDetailView.as_view()),\n # path('products-fbv/<int:pk>/', product_detail_view),\n \n path('admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Drackus001/ecommerce","sub_path":"src/ecommerce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"30537158247","text":"import asyncio\r\n\r\nimport bili_statistics\r\nimport utils\r\nfrom reqs.lotteries_raffle_handler import LotteriesRaffleHandlerReq\r\nfrom tasks.utils import UtilsTask\r\nfrom .base_class import Forced, DontWait, Multi\r\n\r\n\r\nclass LotteriesRaffleJoinTask(Forced, DontWait, Multi): # 负责push\r\n TASK_NAME = 'join_lotteries_raffle'\r\n\r\n @staticmethod\r\n async def check(user, real_roomid, sleep_time=0):\r\n await asyncio.sleep(sleep_time) # 人为延迟\r\n json_rsp = await user.req_s(LotteriesRaffleHandlerReq.check, user, real_roomid)\r\n\r\n next_step_settings = []\r\n for raffle in json_rsp['data']['guard']:\r\n raffle_id = raffle['id']\r\n max_wait = raffle['time']\r\n privilege_type = raffle['privilege_type']\r\n\r\n if privilege_type != 1 and max_wait >= 25 \\\r\n and (not bili_statistics.is_raffleid_duplicate(raffle_id)):\r\n print('本次获取到的大航海抽奖id为', raffle_id)\r\n raffle_data = {\r\n 'raffle_id': raffle_id,\r\n 'room_id': real_roomid,\r\n 'raffle_type': 'GUARD',\r\n 'end_time': max_wait + utils.curr_time(),\r\n 'other_raffle_data': raffle\r\n }\r\n next_step_setting = (-2, (0, 0), raffle_data)\r\n next_step_settings.append(next_step_setting)\r\n bili_statistics.add2raffle_ids(raffle_id)\r\n\r\n for raffle in json_rsp['data']['pk']:\r\n raffle_id = raffle['id']\r\n max_wait = raffle['time']\r\n\r\n if max_wait >= 25 and (not bili_statistics.is_raffleid_duplicate(raffle_id)):\r\n print('本次获取到的大乱斗抽奖id为', raffle_id)\r\n raffle_data = {\r\n 'raffle_id': raffle_id,\r\n 'room_id': real_roomid,\r\n 'raffle_type': 'PK',\r\n 'end_time': max_wait + utils.curr_time(),\r\n 'other_raffle_data': raffle\r\n }\r\n next_step_setting = (-2, (0, 0), raffle_data)\r\n next_step_settings.append(next_step_setting)\r\n bili_statistics.add2raffle_ids(raffle_id)\r\n\r\n for raffle in json_rsp['data']['gift']:\r\n raffle_id = raffle['raffleId']\r\n max_wait = raffle['time']\r\n gift_id = int(raffle['gift_id'])\r\n if max_wait >= 25 and gift_id in (30405, 30406, 30448) \\\r\n and (not bili_statistics.is_raffleid_duplicate(raffle_id)):\r\n print('本次获取到的小电视抽奖id为', raffle_id)\r\n raffle_data = {\r\n 'raffle_id': raffle_id,\r\n 'room_id': real_roomid,\r\n 'raffle_type': 'TV',\r\n 'end_time': max_wait + utils.curr_time(),\r\n 'other_raffle_data': raffle\r\n }\r\n next_step_setting = (-2, (0, 0), raffle_data)\r\n next_step_settings.append(next_step_setting)\r\n bili_statistics.add2raffle_ids(raffle_id)\r\n return next_step_settings\r\n\r\n @staticmethod\r\n async def work(user, raffle_data: dict):\r\n bili_statistics.add2joined_raffles(raffle_data['raffle_type'], user.id)\r\n await UtilsTask.send2yj_monitor(user, raffle_data)\r\n\r\n\r\nclass LotteriesRaffleLoadTask(Forced, DontWait, Multi): # 负责load即保存数据,不会push\r\n TASK_NAME = 'load_lotteries_raffle'\r\n\r\n @staticmethod\r\n async def check(_, __, raffle_id):\r\n if not bili_statistics.is_raffleid_duplicate(raffle_id):\r\n print('本次收录到的抽奖id为', raffle_id)\r\n bili_statistics.add2raffle_ids(raffle_id)\r\n return None\r\n\r\n # 永远不会执行\r\n @staticmethod\r\n async def work():\r\n pass\r\n","repo_name":"yjqiang/YjMonitor","sub_path":"monitor/tasks/lotteries_raffle_handler.py","file_name":"lotteries_raffle_handler.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"50"} +{"seq_id":"5048259734","text":"from spi import *\r\nfrom spi.xml import marshall\r\n\r\nstart = datetime.datetime(2014, 4, 25, 6, 0, 0)\r\nend = datetime.datetime(2014, 4, 25, 13, 0, 0)\r\nschedule = Schedule(Scope(start, end), originator='Global Radio')\r\ninfo = ProgrammeInfo()\r\ninfo.schedules.append(schedule)\r\n\r\n# programme\r\nprogramme = Programme('crid://www.capitalfm.com/4772/1190223', 1190223)\r\n\r\nprogramme.names.append(ShortName('B\\'fast'))\r\nprogramme.names.append(MediumName('Breakfast'))\r\nprogramme.names.append(LongName('Capital Breakfast'))\r\n\r\nlocation = Location()\r\nlocation.times.append(Time(datetime.datetime(2014, 4, 25, 6, 0, 0, 0), datetime.timedelta(hours=4), \r\n actual_time=datetime.datetime(2014, 4, 25, 6, 0, 0, 0), actual_duration=datetime.timedelta(hours=4)))\r\nprogramme.locations.append(location)\r\n\r\nprogramme.descriptions.append(ShortDescription('Forget the coffee, Capital gives you the perfect morning pick-me- up with a blend of the latest hits, travel news and incomparable morning banter.'))\r\n\r\nprogramme.genres.append('urn:tva:metadata:cs:ContentCS:2002:3.6.8')\r\nprogramme.genres.append('urn:tva:metadata:cs:IntentionCS:2002:1.1')\r\n\r\nprogramme.memberships.append(Membership('crid://www.capitalfm.com/4772', 4772))\r\n\r\nprogramme.links.append(Link('mailto:capital.breakfast@capitalfm.com', description='Email the Capital Breakfast team!'))\r\nprogramme.links.append(Link('http://www.capitalfm.com/on-air/breakfast-show/'))\r\n\r\nevent = ProgrammeEvent('crid://thisisglobal.com/4772/1190223/788946', 788946)\r\nevent.names.append(ShortName('Pun'))\r\nevent.names.append(MediumName('No.1 Pun'))\r\nevent.names.append(LongName('London\\'s No. 1 Pun'))\r\nevent_location = Location(times=[RelativeTime(datetime.timedelta(hours=3, minutes=10), datetime.timedelta(minutes=25))])\r\nevent.locations.append(event_location)\r\nevent.descriptions.append(ShortDescription('Can you come up with London\\'s No.1 Pun for our story of the day?'))\r\nprogramme.events.append(event)\r\n\r\nschedule.programmes.append(programme)\r\nprint(marshall(info, indent='\\t'))\r\n","repo_name":"magicbadger/python-hybridspi","sub_path":"examples/build_xml_programmeinfo.py","file_name":"build_xml_programmeinfo.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"14660428669","text":"\"\"\"\nWebsite: https://the-internet.herokuapp.com/large\nCreated: 2/16/2021\nNotes:\n Connected Page Object Script - /pageObjects/LargeDeepDOMPage.py\n\"\"\"\n\nimport time\nimport pytest\nfrom utilities.BaseClass import BaseClass\nfrom pageObjects.LargeDeepDOMPage import LargeDeepDOMPage\n\nclass TestLargeDeepDOM(BaseClass):\n def test_large_deep_dom(self):\n # Enter the Page\n log = self.getLogger()\n largeDeepDOM_page = LargeDeepDOMPage(self.driver)\n largeDeepDOM_page.largeDeepDOM_LinkText().click()\n\n\n # Verify the Header\n header_text = largeDeepDOM_page.largeDeepDOM_HeaderText().text\n assert (\"Large & Deep DOM\" in header_text)\n log.info(\"Header: \" + header_text)\n\n\n # Verify the URL\n url = self.driver.current_url\n assert url == \"https://the-internet.herokuapp.com/large\"\n log.info(\"URL: \" + url)\n\n\n # Move to down location\n element = largeDeepDOM_page.largeDeepDOM_DownLocation()\n self.driver.execute_script('arguments[0].scrollIntoView({block: \"center\", inline: \"center\"})', element)\n assert element.text == '26.2'\n\n\n # Move to table location\n element2 = largeDeepDOM_page.largeDeepDOM_TableLocation()\n self.driver.execute_script('arguments[0].scrollIntoView({block: \"center\", inline: \"center\"})', element2)\n assert element2.text == '15.25'\n\n\n # Exit the Page\n log.info(header_text + \" - All Tests Passed\")\n time.sleep(2)\n self.driver.back()\n self.driver.refresh()\n\n","repo_name":"Grey4114/Python","sub_path":"Selenium/the_internet_herokuapp_website/tests/test_Page_LargeDeepDOM.py","file_name":"test_Page_LargeDeepDOM.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25198358288","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTextEdit, QLineEdit, QPushButton, QVBoxLayout, QHBoxLayout, QWidget\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nimport contextlib\n\nclass ChatGPTApp(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-medium', padding_side='left')\n self.model = AutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-medium')\n \n self.output_edit = QTextEdit()\n self.output_edit.setReadOnly(True)\n\n self.input_edit = QLineEdit()\n self.submit_button = QPushButton(\"Submit\")\n self.submit_button.clicked.connect(self.generate_response)\n\n input_layout = QHBoxLayout()\n input_layout.addWidget(self.input_edit)\n input_layout.addWidget(self.submit_button)\n\n layout = QVBoxLayout()\n layout.addWidget(self.output_edit)\n layout.addLayout(input_layout)\n\n container = QWidget()\n container.setLayout(layout)\n\n self.setCentralWidget(container)\n\n self.setWindowTitle(\"Chatbot-sample\")\n self.setStyleSheet(\"./chat.qss\")\n\n self.show()\n\n def generate_response(self):\n user_input = self.input_edit.text()\n if user_input.strip().lower() in ['exit', 'quit', 'bye']:\n self.output_edit.append(\"Chatbot: Goodbye!\")\n self.close()\n\n input_ids = self.tokenizer.encode(user_input + self.tokenizer.eos_token, return_tensors='pt')\n \n # Redirect both stdout and stderr to suppress warnings\n with contextlib.redirect_stdout(None), contextlib.redirect_stderr(None):\n chatbot_output = self.model.generate(input_ids, max_length=1000, pad_token_id=self.tokenizer.eos_token_id)\n \n chatbot_response = self.tokenizer.decode(chatbot_output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)\n self.output_edit.append(f\"User: {user_input}\")\n self.output_edit.append(f\"Chatbot: {chatbot_response}\")\n self.input_edit.clear()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = ChatGPTApp()\n sys.exit(app.exec_())\n","repo_name":"LingYuWings/NLP-Project","sub_path":"chatbot-test.py","file_name":"chatbot-test.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"3883266352","text":"#!/usr/bin/python3\n\n\n'''\n0-subs.py - a function that queries reddit\nAPI with total subscribers count as response\n'''\n\nimport requests\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"Queries the Reddit API and returns the number of\n subscribers for the 'programming' subreddit\n \"\"\"\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n headers = {\"User-Agent\": \"0-subs/1.0\"}\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n data = response.json()\n subscribers = data['data']['subscribers']\n return subscribers\n else:\n return 0\n","repo_name":"Chimekinglsey/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37800120592","text":"#! /usr/bin/env python\nfrom pathlib import Path\n\nimport h5py as h5\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport typer\nfrom matplotlib.colors import LogNorm, Normalize\n\n\ndef get_canvas():\n fig, ax = plt.subplots()\n return fig, ax\n\n\napp = typer.Typer()\n\n\n@app.command()\ndef main(\n file: Path,\n ylim: float = None,\n max_frequency: float = 0.99,\n max_intensity: float = 1,\n min_intensity: float = 1e-4,\n linear: bool = False,\n):\n # open the sqe file\n typer.echo(f\"Read spectral function from {file}\")\n f = h5.File(file, \"r\")\n\n # typer.echo(\".. keys in file:\")\n # typer.echo(f.keys())\n\n # template output file name\n outfile = file.stem\n\n # get axes and intensity\n x = np.array(f.get(\"q_values\"))\n y = np.array(f.get(\"energy_values\"))\n try:\n gz = np.array(f[\"spectral_function\"])\n except KeyError:\n gz = np.array(f[\"intensity\"]) # compatibility with older sqe.hdf5 files\n\n # integrate intensity in energy\n n_bands = np.trapz(gz, x=y, axis=0).mean()\n typer.echo(f\".. no. of bands: {n_bands}\")\n\n if not ylim and max_frequency < 1:\n # find ylim as fraction of full band occupation\n for nn, yy in enumerate(y):\n gz_int = np.trapz(gz[:nn], x=y[:nn], axis=0).mean()\n if gz_int > max_frequency * n_bands:\n ylim = yy\n typer.echo(f\".. {max_frequency*100}% intensity at {yy:.3f} THz\")\n break\n\n # normalize intensity\n gz /= gz.max()\n\n # add a little bit so that the logscale does not go nuts\n gz = gz + min_intensity\n # for plotting, turn the axes into 2d arrays\n gx, gy = np.meshgrid(x, y)\n # x-ticks\n xt = np.array(f.get(\"q_ticks\"))\n # labels for the x-ticks\n xl = f.attrs.get(\"q_tick_labels\").decode().split()\n # label for y-axis\n yl = f\"Energy ({f.attrs.get('energy_unit').decode():s})\"\n\n # cap intensity\n if max_intensity < 1:\n gz[gz > max_intensity] = max_intensity\n\n fig, ax = get_canvas()\n\n if linear:\n norm = Normalize(vmin=gz.min(), vmax=gz.max())\n else:\n norm = LogNorm(vmin=gz.min(), vmax=gz.max())\n kw = {\"cmap\": \"viridis\", \"shading\": \"auto\", \"norm\": norm}\n ax.pcolormesh(gx, gy, gz, **kw)\n # set the limits of the plot to the limits of the data\n ax.set_xlim([x.min(), x.max()])\n if ylim is None:\n ylim = y.max()\n\n ax.set_ylim([y.min(), ylim])\n ax.set_xticks(xt)\n ax.set_xticklabels(xl)\n ax.set_ylabel(yl)\n\n # talk\n typer.echo(f\".. ylim is {ylim:.4f} THz\")\n typer.echo(f\".. max intensity: {max_intensity}\")\n typer.echo(f\".. use linear scale: {linear}\")\n\n if max_intensity < 1:\n outfile += \"_intensity\"\n if linear:\n outfile += \"_linear\"\n if max_frequency < 0.99:\n outfile += f\"_max_{max_frequency:4.2f}\"\n\n outfile += \".png\"\n typer.echo(f\".. save to {outfile}\")\n fig.savefig(outfile, dpi=300)\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"Guodonglin-cqust/tools.tdep","sub_path":"scripts/tdep_plot_sqe.py","file_name":"tdep_plot_sqe.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36579441758","text":"import random\r\n\r\nweight = [[99, -8, 8, 6, 6, 8, -8, 99],\r\n [-8,-24, -4, -3, -3, -4,-24, -8],\r\n [ 8, -4, 7, 4, 4, 7, -4, 8],\r\n [ 6, -3, 4, 0, 0, 4, -3, 6],\r\n [ 6, -3, 4, 0, 0, 4, -3, 6],\r\n [ 8, -4, 7, 4, 4, 7, -4, 8],\r\n [-8,-24, -4, -3, -3, -4,-24, -8],\r\n [99, -8, 8, 6, 6, 8, -8, 99]]\r\n\r\ndef easy(field, possible, color, opp_color):\r\n \r\n return(random.choice(possible))\r\n\r\ndef greedy(field, possible, color, opp_color):\r\n \r\n cntmax = 0\r\n delta_x = [-1, -1, 0, 1, 1, 1, 0, -1]\r\n delta_y = [ 0, 1, 1, 1, 0, -1, -1, -1]\r\n bestpos = possible[0]\r\n \r\n for position in range(len(possible)):\r\n\r\n counter = 0\r\n pos_x = possible[position][0]\r\n pos_y = possible[position][1]\r\n \r\n for direction in range(8):\r\n\r\n dir_counter = 0\r\n legit = True\r\n current_x = pos_x + delta_x[ direction ]\r\n current_y = pos_y + delta_y[ direction ]\r\n \r\n while(True):\r\n if(current_x < 0 or current_x > 7 or current_y < 0 or current_y > 7):\r\n legit = False\r\n break\r\n \r\n if(field[ current_x ][ current_y] == color):\r\n break\r\n\r\n if(field[ current_x ][ current_y] != color):\r\n legit = False\r\n break\r\n \r\n if(field[ current_x ][ current_y] == opp_color):\r\n dir_counter += 1\r\n\r\n current_x += delta_x[direction]\r\n current_y += delta_y[direction]\r\n\r\n \r\n if(legit == True):\r\n counter += dir_counter\r\n\r\n if(counter > cntmax):\r\n cntmax = counter\r\n bestpos = possible[position]\r\n \r\n return(bestpos)\r\n\r\n\r\ndef weighted(field, possible, color, opp_color):\r\n \r\n maxweight = 0\r\n bestpos = (possible[0])\r\n \r\n for i in range(len(possible)):\r\n pos_x = possible[i][0]\r\n pos_y = possible[i][1]\r\n if(weight[pos_x][pos_y] > maxweight):\r\n maxweight = weight[pos_x][pos_y]\r\n bestpos = possible[i]\r\n\r\n return(bestpos)\r\n \r\n\r\n \r\n","repo_name":"fhalambek/reversi","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22878036855","text":"\nfrom typing import Optional\nfrom entity_types.minecraft_things import DroppedMinecraftBlock, MinecraftItem, PlacedMinecraftBlock\nfrom time_utils import get_current_time\n\nfrom sqlitedict import SqliteDict\n\nfrom db import get_entity_at_position, get_entity_db, get_free_entity_id\nfrom user_utils import get_player_entity_from_request\nfrom utils import MAP_BOUNDS, TILE_SIZE, coordinates_are_walkable, get_position_is_in_bounds, move_coordinates_at_direction\nfrom models import ACTION_SLUGS, Action, Directions, Entity\n\nimport logging\n\nfrom time_utils import get_current_time\nlogger = logging.getLogger(__name__)\n\n\ndef fix_player_if_out_of_bounds(entity: Entity):\n if entity.x < MAP_BOUNDS[0]:\n entity.x = MAP_BOUNDS[0]\n if entity.x > MAP_BOUNDS[2]:\n entity.x = MAP_BOUNDS[2]\n if entity.y < MAP_BOUNDS[1]:\n entity.y = MAP_BOUNDS[1]\n if entity.y > MAP_BOUNDS[3]:\n entity.y = MAP_BOUNDS[3]\n\n\ndef handle_player_touch(request, direction):\n \"\"\"\n figure out what the input does\n\n return a list of entities that were affected\n \"\"\"\n player_entity = get_player_entity_from_request(request)\n entity_db = get_entity_db()\n changed_entities = []\n\n if not player_entity:\n logger.info(\"Player entity not found\")\n return [], changed_entities\n\n target_x, target_y = move_coordinates_at_direction(player_entity.x, player_entity.y, direction)\n player_entity.direction = direction\n\n blocking_entity = get_entity_at_position(target_x, target_y)\n\n action: Optional[ACTION_SLUGS] = None\n if blocking_entity is None:\n action = \"move\"\n else:\n action = blocking_entity.on_touch\n\n if action == \"move\":\n # Additional check to make sure the player doesn't move out of bounds\n if not get_position_is_in_bounds(target_x, target_y, player_entity):\n logger.info(f\"Player can't move to {target_x}, {target_y}\")\n fix_player_if_out_of_bounds(player_entity) \n action = None \n\n if action == \"move\" and player_entity.holding and 'block' in player_entity.holding.slug:\n action = \"place\"\n\n if player_entity.action is not None:\n if player_entity.action.action == \"action\":\n # Probably clicked again, just return nothing\n return [], changed_entities\n logger.info(\"Player is already doing something: %s\", player_entity.action)\n\n if player_entity.action.action == 'move':\n return [], changed_entities\n\n # Cancel the current action\n logger.info(\"Player cancelled action: %s\", player_entity.action)\n if player_entity.action.target_id:\n target_entity = entity_db[player_entity.action.target_id]\n target_entity.finish_shaking()\n if target_entity.carried_by_entity_id:\n # Cant cancel an action on a carried entity\n return [], changed_entities\n\n entity_db[target_entity.id] = target_entity\n changed_entities.append(target_entity)\n\n player_entity.action = None\n player_entity.update_sprites()\n entity_db[player_entity.id] = player_entity\n return [], changed_entities + [player_entity]\n\n if player_entity.carrying_entity_id:\n if action != 'move':\n return [], changed_entities\n\n if action:\n if action == \"move\":\n logger.info(f\"Player started moving from {player_entity.x} to direction {direction}\")\n\n player_entity.start_moving(target_x, target_y)\n if player_entity.carrying_entity_id:\n player_entity.holding = None\n logger.info(f\"Player is carrying {player_entity.carrying_entity_id}\")\n carried_entity = entity_db[player_entity.carrying_entity_id]\n carried_entity.start_moving(target_x, target_y - 30)\n entity_db[carried_entity.id] = carried_entity\n changed_entities.append(carried_entity)\n elif action == \"swing\":\n base_time = 10000\n efficiency = 1\n if player_entity.holding:\n efficiency = (\n player_entity.holding.get_material_swinging_efficiency(blocking_entity.made_of)\n ) or 1\n logger.info(f\"Player started swinging at {blocking_entity.id} with efficiency {efficiency}\")\n final_time = base_time / efficiency\n player_entity.action = Action(\n action='swing',\n time=final_time,\n timeout=get_current_time() + final_time,\n target_id=blocking_entity.id\n )\n blocking_entity.start_shaking()\n entity_db[blocking_entity.id] = blocking_entity\n changed_entities.append(blocking_entity)\n elif action == \"to_inventory\":\n logger.info(f\"Player started picking up {blocking_entity.id}\")\n player_entity.action = Action(\n action='to_inventory',\n time=500,\n timeout=get_current_time() + 500,\n target_id=blocking_entity.id\n )\n elif action == \"place\":\n logger.info(f\"Player started placing\")\n player_entity.action = Action(\n action='place',\n time=500,\n timeout=get_current_time() + 500,\n )\n elif action == \"pick_up\":\n logger.info(f\"Player started pick_up\")\n player_entity.action = Action(\n target_id=blocking_entity.id,\n action='pick_up',\n time=500,\n timeout=get_current_time() + 500,\n )\n player_entity.holding = None\n blocking_entity.start_moving(player_entity.x, player_entity.y - 30)\n blocking_entity.carried_by_entity_id = player_entity.id\n entity_db[blocking_entity.id] = blocking_entity\n changed_entities.append(blocking_entity)\n\n player_entity.update_sprites()\n\n entity_db[player_entity.id] = player_entity\n\n return [], changed_entities + [player_entity]\n\n\ndef handle_action_finished(entity: Entity, action: Action, entity_db: SqliteDict) -> tuple[list[Entity], list[str]]:\n changed_entities = []\n deleted_entity_ids = []\n if not action:\n return changed_entities, deleted_entity_ids\n logger.info(f\"Action finished: {action.action}, {action.target_id}\")\n if action.action == 'move':\n entity.finish_moving()\n entity.update_sprites()\n entity_db[entity.id] = entity\n if entity.carrying_entity_id:\n carried_entity = entity_db[entity.carrying_entity_id]\n carried_entity.finish_moving()\n carried_entity.update_sprites()\n entity_db[carried_entity.id] = carried_entity\n changed_entities.append(carried_entity)\n elif action.action == 'swing':\n target_entity = entity_db[action.target_id]\n changed_entities = target_entity.on_swing_destroy()\n del entity_db[action.target_id]\n deleted_entity_ids.append(action.target_id)\n entity.action = None\n entity.update_sprites()\n entity_db[entity.id] = entity\n elif action.action == 'to_inventory':\n target_entity: DroppedMinecraftBlock = entity_db[action.target_id]\n for item in entity.inventory.items:\n if item.slug == f\"{target_entity.block_type}_block\":\n item.quantity += 1\n break\n else:\n inventory_item = MinecraftItem(\n slug=f\"{target_entity.block_type}_block\",\n quantity=1\n )\n entity.inventory.items.append(inventory_item)\n del entity_db[action.target_id]\n deleted_entity_ids.append(action.target_id)\n entity.action = None\n entity.update_sprites()\n entity_db[entity.id] = entity\n elif action.action == 'place':\n if entity.holding:\n block_type = entity.holding.slug\n target_x, target_y = move_coordinates_at_direction(entity.x, entity.y, entity.direction)\n\n new_block = PlacedMinecraftBlock(\n id=get_free_entity_id(),\n block_type=block_type.replace('_block', ''),\n made_of=block_type.replace('_block', ''),\n x=target_x,\n y=target_y,\n )\n new_block.update_sprites()\n entity_db[new_block.id] = new_block\n changed_entities.append(new_block)\n\n entity.holding.quantity -= 1\n index = -1\n for item in entity.inventory.items:\n index += 1\n if item.slug == entity.holding.slug:\n item.quantity = entity.holding.quantity\n if item.quantity <= 0:\n del entity.inventory.items[index]\n entity.holding = None\n break\n\n entity.action = None\n entity.update_sprites()\n entity_db[entity.id] = entity\n elif action.action == \"pick_up\":\n entity.carrying_entity_id = action.target_id\n entity.action = None\n entity.update_sprites()\n entity_db[entity.id] = entity\n logger.info(\"Player finished pick_up, is now carrying entity %s\", action.target_id)\n\n target_entity: Entity = entity_db[action.target_id]\n target_entity.finish_moving()\n target_entity.carried_by_entity_id = entity.id\n target_entity.action = None\n target_entity.update_sprites()\n entity_db[target_entity.id] = target_entity\n changed_entities.append(target_entity)\n else:\n entity.action = None\n entity.update_sprites()\n entity_db[entity.id] = entity\n return changed_entities, deleted_entity_ids\n\n\ndef update_actions() -> tuple[list[Entity], list[str]]:\n \"\"\"\n Handle actions in progress, and remove them if they are done\n \"\"\"\n entity_db = get_entity_db()\n changed_entities = []\n deleted_entity_ids = []\n now = get_current_time()\n for entity_id, entity in entity_db.items():\n if entity.action is None:\n continue\n if entity.action.timeout < now:\n changed_entities.append(entity)\n continue\n\n if len(changed_entities) == 0:\n return ([], [])\n\n for entity in changed_entities:\n handle_result = (\n handle_action_finished(entity, entity.action, entity_db)\n )\n changed_entities.extend(handle_result[0])\n deleted_entity_ids.extend(handle_result[1])\n\n return changed_entities, deleted_entity_ids\n\n\ndef handle_player_action(request_sid):\n player_entity = get_player_entity_from_request(request_sid)\n if not player_entity:\n return [], []\n entity_db = get_entity_db()\n changed_entities = []\n deleted_entity_ids = []\n\n if player_entity.carrying_entity_id:\n carried_entity = entity_db[player_entity.carrying_entity_id]\n target_x, target_y = move_coordinates_at_direction(player_entity.x, player_entity.y, player_entity.direction)\n if coordinates_are_walkable(target_x, target_y):\n carried_entity.start_moving(target_x, target_y)\n carried_entity.carried_by_entity_id = None\n carried_entity.update_sprites()\n entity_db[carried_entity.id] = carried_entity\n changed_entities.append(carried_entity)\n\n player_entity.carrying_entity_id = None\n player_entity.update_sprites()\n entity_db[player_entity.id] = player_entity\n changed_entities.append(player_entity)\n\n return deleted_entity_ids, changed_entities\n","repo_name":"Eerovil/CastCraft","sub_path":"backend/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":11648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"21486268515","text":"import socket\nfrom _thread import *\nfrom lobby import Lobby\nimport pickle\nimport time\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserver = \"localhost\"\nport = 5555\n\nserver_ip = socket.gethostbyname(server)\n\nconnections = 0\n\ngames = {}\n\n\ndef main():\n try:\n sock.bind((server, port))\n\n except socket.error as e:\n print(str(e))\n\n sock.listen()\n print(\"[START] Waiting for a connection\")\n\n while True:\n client, addr = sock.accept()\n print(\"[CONNECT] New Connection\")\n\n lobbyId = int(client.recv(1024).decode())\n if lobbyId == -1:\n lobbyId = id_generator()\n games[lobbyId] = Lobby(client, lobbyId)\n client.send(b\"Creating New Lobby \" + str(lobbyId).encode())\n print(\"Created New Lobby\")\n games[lobbyId].start()\n\n elif lobbyId in games.keys():\n if not len(games[lobbyId].clients) == 2:\n games[lobbyId].clients.append(client)\n client.send(b\"Joining Into an Existing Lobby\")\n\n else:\n client.send(b\"[ERROR] 743: lobby is full\")\n\n else:\n client.send(b\"[ERROR] 404: lobby not found\")\n\n\ndef id_generator():\n import random\n lobbyId = random.randint(1000, 9999)\n while lobbyId in games.keys():\n lobbyId = random.randint(1000, 9999)\n\n return lobbyId\n\nmain()\n","repo_name":"AmitWin/Mini-Final-Project","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9137272254","text":"N, A, B = map(int, input().split())\nH = [int(input()) for _ in range(N)]\n\n\ndef ceil(a, b):\n return -(-a//b)\n\n\ndef check(k):\n cnt = 0\n for i in range(N):\n cnt += ceil(max(0, H[i]-B*k), A-B)\n if cnt <= k:\n return True\n return False\n\n\nr = 1 << 60\nl = -1\nwhile r-l > 1:\n mid = (l+r)//2\n if check(mid):\n r = mid\n else:\n l = mid\nprint(r)\n","repo_name":"Nikkuniku/AtcoderProgramming","sub_path":"ABC/ABC001~ABC099/ABC063/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13181999291","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport cv2, os, math, time, sys\nfrom datetime import timedelta\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import normalize\n\n###Configuration\n\"\"\"\nData Configurations/Paths\n\"\"\"\nimg_dir_patch=\"./SD/predicted_patches\"\nimg_dir_orig = \"./SD/original_images\"\nimg_dir=\"../../original_images/SD\"\nmodel50_SD = 'SD/50kSD_Model.ckpt'\nmodel50_left_mask = 'SD/50k_left_mask.ckpt'\nmodel50_right_mask = 'SD/50k_right_mask.ckpt'\n\nimg_type = \"original\"\n# img_type = \"patch\"\n\n##\n# Convolutional Layer 1.\nfilter_size0 = 16 # Convolution filters are 4 x 4 pixels.\nnum_filters0 = 64 # There are 16 of these filters.\n\nfilter_size1 = 8 # Convolution filters are 4 x 4 pixels.\nnum_filters1 = 64 # There are 16 of these filters.\n\n# Convolutional Layer 2.\nfilter_size2 = 8 # Convolution filters are 2 x 2 pixels.\nnum_filters2 = 32 # There are 32 of these filters.\n\nfilter_size3 = 8 # Convolution filters are 2 x 2 pixels.\nnum_filters3 = 32 # There are 32 of these filters.\n\n# Convolutional Layer 3.\nfilter_size4 = 4 # Convolution filters are 2 x 2 pixels.\nnum_filters4 = 32 # There are 64 of these filters.\n\nfilter_size5 = 2 # Convolution filters are 2 x 2 pixels.\nnum_filters5 = 16 # There are 64 of these filters.\n\n\n# Fully-connected layer.\nfc_size = 2000 # Number of neurons in fully-connected layer.\n\n# We know that images are 60 pixels in each dimension.\n# img_size = 8 * 4\n\n# Images are stored in one-dimensional arrays of this length.\n\n# Number of colour channels for the images: 3 channel for RGB.\nnum_channels = 3\n\n# Tuple with height and width of images used to reshape arrays.\nimg_shape = (7, 7, num_channels)\n\n# Number of classes, one class for same or different image\nnum_classes = img_shape[0]*img_shape[1]\norig_patch_size = (2, 2, 3)\nnpatches = 1\n\n\"\"\"Load Data and other functions\"\"\"\ndef change_label_dimensions(labels):\n label_temp = np.zeros((len(labels), 2))\n \n for idx in range(0, len(labels)):\n if labels[idx] == 1:\n label_temp[idx][1] = 1\n else:\n label_temp[idx][0] = 1\n\n \n return label_temp\n\ndef load_data(img_dir):\n list_of_orig_imgs = []\n list_of_labels = []\n list_same_diff = []\n list_img_keys = []\n for img in os.listdir(img_dir):\n \n img_path = os.path.join(img_dir, img)\n list_same_diff.append(int(os.listdir(img_path)[0]))\n list_img_keys.append(img)\n img_path = img_path + \"/\" + os.listdir(img_path)[0]\n for img_label in os.listdir(img_path):\n img_data = os.path.join(img_path, img_label)\n if img_label == \"img\":\n# print(img_data + \"/img.png\")\n list_of_orig_imgs.append(img_data + \"/img.png\")\n else:\n list_of_labels.append([os.path.join(img_data, label) for label in os.listdir(img_data)])\n\n data_imgs = np.array(list_of_orig_imgs)\n data_labels = np.array(list_of_labels)\n data_same_diff = np.array(list_same_diff)\n data_img_keys = np.array(list_img_keys)\n\n return data_imgs, data_labels, data_same_diff, data_img_keys\n\n \ndef get_batch_images(data_orig, label, same_diff, img_keys, rshp, grey_scale):\n list_of_orig_imgs = []\n list_of_labels = []\n list_of_same_diff = []\n list_of_img_keys = []\n for img_orig, lbl, img_type, img_key in zip(data_orig, label, same_diff, img_keys):\n if (grey_scale):\n orig_img = cv2.imread(img_orig)\n \n orig_lbl = cv2.imread(lbl, cv2.IMREAD_GRAYSCALE)\n if orig_img is None or orig_lbl is None:\n print (\"Unable to read image{} or {}\".format(img_orig, lbl))\n continue\n \n# if (grey_scale):\n# orig_lbl = rgb2grey(orig_lbl)\n\n flattened_orig_img = orig_img.flatten()\n flattened_lbl = orig_lbl.flatten()\n \n# if grey_scale:\n# flattened_lbl = np.reshape(flattened_lbl, [10, 10])\n# print(flattened_lbl)\n# flattened_lbl = normalize(flattened_lbl)\n# print(flattened_lbl)\n \n list_of_orig_imgs.append(np.asarray(flattened_orig_img, dtype=np.float32))\n \n list_of_labels.append(np.asarray(flattened_lbl, dtype=np.float32))\n list_of_same_diff.append(img_type)\n list_of_img_keys.append(img_key)\n\n data_labels = np.array(list_of_labels)\n data_imgs = np.array(list_of_orig_imgs)\n data_img_type = np.array(list_of_same_diff)\n data_img_keys = np.array(list_of_img_keys)\n \n \"\"\"this function call locates top left location of each patch in the mask and return. Comment it if contents are required.\"\"\"\n# print(data_labels.shape)\n# data_labels = get_patch_loc(data_labels)\n \n return data_imgs, data_labels, data_img_type, data_img_keys\n\ndef next_batch(num, data, labels):\n '''\n Return a total of `num` random samples and labels. \n '''\n idx = np.arange(0 , len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_orig = data\n data_orig_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[ i] for i in idx]\n\n return np.asarray(data_orig_shuffle), np.asarray(labels_shuffle)\n\n\"\"\"Helpers functions for the Network\"\"\"\ndef new_weights(shape, layer_name):\n initializer = tf.contrib.layers.xavier_initializer()\n return tf.Variable(initializer(shape), name=layer_name+'_W')\n\ndef new_bias(length, layer_name):\n return tf.Variable(tf.constant(0.05, shape=[length]), name=layer_name+'_b')\n\ndef new_conv_layer(input,\n num_input_channels,\n filter_size,\n num_filters,\n name_scope,\n layer_name='',\n use_pooling=True):\n\n with tf.name_scope(name_scope):\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n weights = new_weights(shape, layer_name)\n biases = new_bias(num_filters, layer_name)\n\n layer = tf.add(tf.nn.conv2d(input=input, filter=weights, strides=[1,1,1,1], padding='SAME'), biases, name=layer_name)\n\n if use_pooling:\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME', name=layer_name+'_max')\n layer = tf.nn.relu(layer, name=layer_name+'_activation')\n\n return layer, weights\n\ndef flatten_layer(layer):\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n \n return layer_flat, num_features\n\ndef new_fc_layer(input,\n num_inputs,\n num_outputs,\n name_scope,\n layer_name='',\n use_relu=True):\n \n with tf.name_scope(name_scope):\n weights = new_weights([num_inputs, num_outputs], layer_name)\n biases = new_bias(num_outputs, layer_name)\n\n layer = tf.add(tf.matmul(input, weights),biases,name=layer_name)\n # layer = tf.matmul(input, weights) + biases\n\n if use_relu:\n layer = tf.nn.relu(layer, layer_name+'_activation')\n \n return layer\n\ndef normalise(tensor):\n return tf.div(\n tf.subtract(\n tensor, \n tf.reduce_min(tensor)\n ), \n tf.subtract(\n tf.reduce_max(tensor), \n tf.reduce_min(tensor)\n )\n)\n\ndef normalized(arr):\n return (arr - np.min(arr))/(np.max(arr) - np.min(arr))\n\ndef optimize(num_epochs, save_model=True,save_name= \"base_model\",restore_model=False,restore_name=None):\n total_iterations = 0\n done_train_imgs = 0\n start_time = time.time()\n start_ = 0\n end_ = train_batch_size \n plot_accuracy=[]\n plot_accuracy_epoch=[]\n plot_training_size=[]\n plot_training_size_epoch=[]\n saver = tf.train.Saver()\n sum_accuracy = 0.0\n n = 1\n \n #to save the model\n for i in range(0, num_epochs): \n start_batch=0\n end_batch = train_batch_size\n \n print(\"Epoch:\", i + 1)\n \n if restore_model==True:\n if restore_name==None:\n print(\"No model file specified\")\n return\n else:\n saver.restore(session,restore_name)\n \n sum_accuracy = 0.0\n n = 1\n while end_batch < total_imgs:\n train_orig = train_orig_data[start_batch:end_batch]\n labels = train_labels_data[start_batch:end_batch]\n# print('Labels:', labels)\n img_type_lbl = img_type[start_:end_]\n img_key = img_keys[start_:end_]\n dims = (len(train_orig), num_classes, num_channels)\n train, labels, img_type_lbl, img_key = get_batch_images(train_orig, labels, img_type_lbl, img_key, dims, True)\n if not len(train) and not len(labels):\n print(\"All images have been processed.\")\n break;\n\n x_orig_batch, y_true_batch = next_batch(len(train), train, labels)\n feed_dict_train = {x: x_orig_batch,\n y_true: y_true_batch}\n \n session.run(optimizer, feed_dict=feed_dict_train)\n \n acc,co = session.run([accuracy, cost], feed_dict=feed_dict_train)\n sum_accuracy += acc\n n+=1\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}, Loss: {2:>.4f}\"\n print(msg.format(end_batch + 1, acc, co))\n if i == num_epochs - 1:\n plot_accuracy.append(acc)\n plot_training_size.append(end_batch + 1)\n\n start_batch += train_batch_size\n end_batch += train_batch_size\n \n if save_model==True:\n if save_name==None:\n print(\"No model specified, model not being saved\")\n return\n else:\n save_path = saver.save(session, save_name)\n restore_model = True\n print(\"Model saved in file: %s\" % save_name)\n plot_accuracy_epoch.append(sum_accuracy/n)\n plot_training_size_epoch.append(i + 1)\n \n end_time = time.time()\n # Difference between start and end-times.\n time_dif = end_time - start_time\n # Print the time-usage.\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif))))) \n print(plot_accuracy)\n print(plot_training_size)\n print(plot_accuracy_epoch)\n print(plot_training_size_epoch)\n\ndef restore_see_layer(orig, model_name=None, var_name=None):\n with tf.Session('', tf.Graph()) as s:\n with s.graph.as_default():\n if ((model_name != None) and var_name != None):\n saver = tf.train.import_meta_graph(model_name+\".meta\")\n saver.restore(s, model_name)\n# print(pred.shape)\n fd = {'x:0': orig}\n# print(fd.shape)\n var_name=var_name+\":0\"\n \n result = 0\n result = s.run(var_name, feed_dict=fd)\n return result\n\ndef see_output_grey(iNp,depth_filter_to_see=0,cmap=\"gray\",figsize=(4,4)):\n img_x = iNp[0,:,:]\n fig = plt.figure(figsize=figsize)\n plt.imshow(img_x, interpolation='none', aspect='auto')\n# plt.colorbar(img_x, orientation='horizontal')\n plt.show()\n\n\ndef see_output(iNp,depth_filter_to_see=0,cmap=\"gray\",figsize=(4,4)):\n img_x = iNp[0,:,:,:]\n fig = plt.figure(figsize=figsize)\n if cmap == \"gray\":\n plt.imshow(img_x, cmap=plt.get_cmap('gray'))\n else:\n plt.imshow(img_x, interpolation='none', aspect='auto')\n# plt.colorbar(img_x, orientation='horizontal')\n plt.show()\n \ndef save_patch_images(img_x1, lbl_x1, index):\n if not os.path.exists('./SD/predicted_patches/' + str(index)):\n os.makedirs('./SD/predicted_patches/' + str(index))\n os.makedirs('./SD/predicted_patches/' + str(index) + \"/\" + str(lbl_x1))\n \n plt.imsave('./SD/predicted_patches/' + str(index) + \"/\" + str(lbl_x1) + '/img.png', np.squeeze(img_x1))\n \n\ndef predict_nd_save(train, labels, img_type_lbl, img_key, start_idx):\n\n for index in range(0, len(train)):\n img_x = train[index:index+1, :]\n lbl_x = labels[index:index+1, :]\n img_type_x = img_type_lbl[index]\n img_key_x = img_key[index]\n prediction = restore_see_layer(ix=img_x,model_name=model2_50000,var_name='Softmax')\n prediction = np.reshape(prediction, (1, 4, 2, 1)) \n save_patch_images(prediction, img_type_x, img_key_x)\n\n\n\nx = tf.placeholder(tf.float32, shape=[None, img_shape[0]*img_shape[1]*num_channels], name='x')\nx_image = tf.reshape(x, [-1, img_shape[0], img_shape[1], num_channels])\ny_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')\ny_true_cls = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true_cls')\n\nlayer0_conv0, weights_conv0 = new_conv_layer(input=x_image,\n num_input_channels=num_channels,\n filter_size=filter_size0,\n num_filters=num_filters0,\n name_scope='conv',\n layer_name='conv1',\n use_pooling=True)\n\n\nlayer1_conv1, weights_conv1 = new_conv_layer(input=layer0_conv0,\n num_input_channels=num_filters0,\n filter_size=filter_size1,\n num_filters=num_filters1,\n name_scope='conv',\n layer_name='conv2',\n use_pooling=True)\n\n\n\nlayer2_conv2, weights_conv2 = new_conv_layer(input=layer1_conv1,\n num_input_channels=num_filters1,\n filter_size=filter_size2,\n num_filters=num_filters2,\n name_scope='conv',\n layer_name='conv3',\n use_pooling=True)\n\n\n\nlayer3_conv3, weights_conv3 = new_conv_layer(input=layer2_conv2,\n num_input_channels=num_filters2,\n filter_size=filter_size3,\n num_filters=num_filters3,\n name_scope='conv',\n layer_name='conv4',\n use_pooling=True)\n\n\n\nlayer4_conv4, weights_conv4 = new_conv_layer(input=layer3_conv3,\n num_input_channels=num_filters3,\n filter_size=filter_size4,\n num_filters=num_filters4,\n name_scope='conv',\n layer_name='conv5',\n use_pooling=True)\n\n\n\nlayer5_conv5, weights_conv5 = new_conv_layer(input=layer4_conv4,\n num_input_channels=num_filters4,\n filter_size=filter_size5,\n num_filters=num_filters5,\n name_scope='conv',\n layer_name='conv6',\n use_pooling=True)\n\n\n\nlayer_flat, num_features = flatten_layer(layer5_conv5)\n\n\nlayer_fc1 = new_fc_layer(input=layer_flat,\n num_inputs=num_features,\n num_outputs=fc_size,\n name_scope='fc',\n layer_name='fc1',\n use_relu=True)\n\n\n\nlayer_fc2 = new_fc_layer(input=layer_fc1,\n num_inputs=fc_size,\n num_outputs=fc_size,\n name_scope='fc',\n layer_name='fc2',\n use_relu=True)\n\n\n\nlayer_fc3 = new_fc_layer(input=layer_fc2,\n num_inputs=fc_size,\n num_outputs=fc_size,\n name_scope='fc',\n layer_name='fc3',\n use_relu=True)\n\n\n\nlayer_fc4 = new_fc_layer(input=layer_fc3,\n num_inputs=fc_size,\n num_outputs=num_classes,\n name_scope='fc',\n layer_name='fc4',\n use_relu=True)\n\n\ny_pred = layer_fc4\n\ncost = tf.reduce_mean(tf.square(y_true - y_pred))\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\n\n# ## some more performance measures\ncorrect_prediction = tf.equal(y_pred, y_true)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\ntrain_data, train_labels, img_type, img_keys = load_data(img_dir)\nsession = tf.Session()\ninit = tf.global_variables_initializer()\nsession.run(init)\ntrain_labels_data = train_labels[:][:, 1]#1=mask_patch_2,2=mask_patch_1\n#print(train_labels_data)\n#sys.exit(0)\ntrain_orig_data = train_data[:]\nimg_type = img_type[:]\nimg_keys = img_keys[:]\ntotal_imgs = len(img_type)\ntrain_batch_size = 64 \n\n\n\"\"\"Main\"\"\"\nif __name__ == \"__main__\":\n\n save_model = True\n save_name = model50_right_mask\n restore_model=False\n restore_name=model50_right_mask\n\n optimize(5,\n save_model=True,save_name=model50_right_mask,restore_model=restore_model,restore_name=model50_right_mask)\n","repo_name":"asaeed9/psvrt","sub_path":"base_line_exp/python_scripts/sep_patches_train.py","file_name":"sep_patches_train.py","file_ext":"py","file_size_in_byte":18097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16977250164","text":"#!/usr/bin/env python\n\n\"\"\"\nSelect Wilcoxon marker genes\n\nUsage:\n method-wilcoxon.py --out-file=<path> [options] <file>\n\nOptions:\n -h --help Show this screen.\n -o --out-file=<path> Path to output file.\n -n --n-features=<int> Number of features to select per label [default: 200].\n\"\"\"\n\n\ndef select_features_wilcoxon(adata, n_features):\n \"\"\"\n Select features using the Wilcoxon rank sum test to detect marker genes\n\n Parameters\n ----------\n adata\n AnnData object\n n_features\n Number of features to select per label\n\n Returns\n ----------\n DataFrame containing the selected features\n \"\"\"\n\n from scanpy.preprocessing import normalize_total, log1p\n from scanpy.tools import rank_genes_groups, filter_rank_genes_groups\n from scanpy.get import rank_genes_groups_df\n from pandas import DataFrame\n\n print(\"Normalising expression...\")\n normalize_total(adata, target_sum=1e4)\n log1p(adata)\n\n print(\"Testing for marker genes...\")\n # Remove labels with no cells (unseen labels)\n adata.obs[\"Label\"] = adata.obs[\"Label\"].cat.remove_unused_categories()\n rank_genes_groups(adata, groupby=\"Label\", method=\"wilcoxon\", tie_correct=True)\n\n print(\"Filtering marker genes...\")\n filter_rank_genes_groups(\n adata, min_in_group_fraction=0.1, max_out_group_fraction=0.8\n )\n\n selected_features = []\n for label in adata.obs[\"Label\"].cat.categories:\n print(f\"Getting results for label '{label}'...\")\n filtered_results = rank_genes_groups_df(\n adata, group=str(label), key=\"rank_genes_groups_filtered\"\n )\n filtered_results = filtered_results[filtered_results[\"names\"].notnull()]\n\n filtered_results = filtered_results[filtered_results[\"pvals_adj\"] <= 0.01]\n\n filtered_results = filtered_results.sort_values(\n by=\"logfoldchanges\", ascending=False\n )\n filtered_results = filtered_results.head(n=n_features)\n\n selected_features = selected_features + list(filtered_results[\"names\"])\n\n selected_features = list(set(selected_features))\n print(f\"Selected {len(selected_features)} features...\")\n output = DataFrame(selected_features, columns=[\"Feature\"])\n\n return output\n\n\ndef main():\n \"\"\"The main script function\"\"\"\n from docopt import docopt\n from scanpy import read_h5ad\n\n args = docopt(__doc__)\n\n file = args[\"<file>\"]\n n_features = int(args[\"--n-features\"])\n out_file = args[\"--out-file\"]\n\n print(f\"Reading data from '{file}'...\")\n input = read_h5ad(file)\n print(\"Read data:\")\n print(input)\n output = select_features_wilcoxon(input, n_features)\n print(f\"Writing output to '{out_file}'...\")\n output.to_csv(out_file, sep=\"\\t\", index=False)\n print(\"Done!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"theislab/atlas-feature-selection-benchmark","sub_path":"bin/method-wilcoxon.py","file_name":"method-wilcoxon.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27764838900","text":"vmax = float(input('Velocidade máxima: '))\nvmedida = float(input('velocidade medida: '))\n\nif vmedida <= 0 or vmedida >= 300:\n print('Velocidade inválida')\nelse:\n v = vmedida - vmax\n \n if v <= 0:\n print('Motorista não cometeu infração')\n elif 0 < v <= 10:\n print('Multa de R$120.00 e 2 pontos na CNH')\n elif 10 < v <= 30:\n print('250.00 e 5')\n else:\n print('600.00 e 7')","repo_name":"S4Muel-Silva/arquivos_bcc701","sub_path":"simulado/simulado1.py","file_name":"simulado1.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2734041894","text":"from fastapi import FastAPI\nfrom typing import Dict , Union , Optional , List\nimport pandas as pd\nfrom tensorflow.keras.models import load_model\nfrom csgo.ml.preprocessing import preprocess_features\nfrom csgo.scraper.data_scraper import scrape_matches , scrape_match_round_data , scrape_player_ids\nfrom csgo.utils.connectors import fetch_leaderboard\nimport os\n\napp = FastAPI()\nmodel_path = \"../../notebooks/csgo_tf\"\napp.state.model = load_model(model_path)\n\n@app.get('/')\ndef home() -> Dict[str,str]:\n return {\"CSGO API\": \"Made by NChan\"}\n\n@app.get('/match')\ndef matches(limit : Optional[int] = None , rank : Optional[int] = None) -> List:\n \"\"\"Returns live matches\"\"\"\n matches = []\n for match in scrape_matches():\n matches.append(match)\n\n if limit and len(matches) >= limit:\n break\n\n return list(filter(lambda match : match.get(\"rankNum\") == rank , matches)) if rank else matches\n\n\n@app.get('/match/{matchId}/players')\ndef players(matchId : int) -> Dict:\n player_ids = scrape_player_ids(matchId)\n team_0 = player_ids[:5]\n team_1 = player_ids[5:]\n\n team_0_players = {}\n for player_info in team_0:\n team_0_players = { **team_0_players , **player_info}\n\n team_1_players = {}\n for player_info in team_1:\n team_1_players = { **team_1_players , **player_info}\n\n return {\"match\" : matchId , \n \"team_0\" : team_0_players , \n \"team_1\" : team_1_players\n }\n\n\n@app.get('/players/{playerId}')\ndef get_player_info(playerId : int) -> Dict:\n return {\"message\" : \"Under Construction!\"}\n\n\n@app.get('/leaderboard')\ndef leaderboard(page : Optional[int] = 1) -> List[Dict]:\n leaderboard_entries = fetch_leaderboard(os.environ.get(\"DB_NAME\") , page = page)\n return leaderboard_entries\n \n\n@app.get('/match/{matchId}/rounds')\ndef match(matchId : int , round_index : Optional[int] = None) -> Dict:\n round_data = scrape_match_round_data(matchId)\n try:\n if round_index:\n round_data = round_data[round_index]\n except IndexError:\n pass\n finally:\n return {\"match\" : matchId , \"rounds\" : round_data }\n\n\n@app.get('/about')\ndef about():\n return {\"Secret Portal\": \"Cookie!\"}\n\n\n@app.post('/predict')\ndef predict(game_stats : Dict[str , Union[float,str,int,bool]]) -> Dict:\n X_pred = pd.DataFrame(game_stats , index = [0])\n X_pred_trans = preprocess_features(X_pred)\n prediction = app.state.model.predict(X_pred_trans)\n return {\"CT win\": float(prediction[0][0]),\n \"T win\": 1 - float(prediction[0][0])}\n \n\n","repo_name":"NicoleChant/csgo-analytics","sub_path":"csgo/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12260187214","text":"import logging\n\nclass CustomFormatter(logging.Formatter):\n\n grey = \"\\x1b[38;20m\"\n yellow = \"\\x1b[33;20m\"\n red = \"\\x1b[31;20m\"\n bold_red = \"\\x1b[31;1m\"\n reset = \"\\x1b[0m\"\n green = \"\\x1b[1;32m\"\n format = \"%(levelname)s| %(message)s \"\n\n FORMATS = {\n logging.DEBUG: grey + format + reset,\n logging.INFO: green + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n\n def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n formatter = logging.Formatter(log_fmt)\n return formatter.format(record)\n\n# create logger with 'spam_application'\nlogger = logging.getLogger(\"My_app\")\nlogger.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(CustomFormatter())\n\nlogger.addHandler(ch)\n\n\ndebug = lambda _:logger.debug(_)\ninfo = lambda _:logger.info(_)\nwarning = lambda _:logger.warning(_)\nerror = lambda _:logger.error(_)\ncritical = lambda _:logger.critical(_)","repo_name":"JoyenBenitto/Antenna_generator","sub_path":"ant_gen/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"19332727333","text":"#!/usr/bin/env python3\n\n# Week 6\n# Mailroom Assignment testing - Part 4\nimport os\nimport sys\nimport pytest\nimport mailroom4\n\n\ndef test_print_donor_list():\n \"\"\"Test printing the donor list.\"\"\"\n donor = mailroom4.print_donor_list()\n assert donor[0] == 'Jimmy Nguyen'\n assert donor[1] == 'Steve Smith'\n\n\ndef test_get_donor():\n \"\"\"Testing that the name is pulled correctly.\"\"\"\n name_one = \"Jimmy Nguyen\"\n name_two = \"Elizabeth McBath\"\n\n assert name_one in mailroom4.donors.keys()\n assert name_two in mailroom4.donors.keys()\n\n\ndef test_create_report():\n \"\"\"Test for creating a report of donors with donation amounts.\"\"\"\n\n report_keys = mailroom4.donors.keys()\n report_values = list(mailroom4.donors.values())\n\n assert \"Jimmy Nguyen\" in report_keys\n assert report_values[0][1] == 1350\n\n\ndef test_letter():\n \"\"\"Test the letter function.\"\"\"\n letter = \"\"\"Dear {},\\nThank you for your very kind donation of {:.2f}.\\n\\nIt will be put to very good use.\\n\\n \\t\\tSincerely,\\n\\t\\t\\t-The Team\"\"\"\n donor_list = list(mailroom4.donors.keys())\n donor = donor_list[0]\n result = mailroom4.letter(donor)\n assert result == letter.format(donor, mailroom4.donors[donor][-1])\n\n\ndef test_send_letter_file():\n \"\"\"Test for writing a letter to a donor.\"\"\"\n # pytest.set_trace() # invoke PDB debugger and tracing\n mailroom4.send_letter_file()\n\n assert os.path.isfile(\"Jimmy Nguyen.txt\")\n assert os.path.isfile(\"Elizabeth McBath.txt\")\n","repo_name":"UWPCE-PythonCert-ClassRepos/Wi2018-Online","sub_path":"students/daniel_grubbs/lesson06/test_mailroom4.py","file_name":"test_mailroom4.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70182272151","text":"#Q.2.\r\nclass student:\r\n def _init_(self):\r\n self.__student_id=None\r\n self.__marks=None\r\n self.__age=None\r\n def set_details(self,sid,sm,sa):\r\n self.__student_id=sid\r\n self.__marks=sm\r\n self.__age=sa\r\n def validate_marks(self):\r\n if 100>=self.__marks>=0:\r\n return True\r\n else:\r\n return False\r\n def validate_age(self):\r\n if self.__age>20:\r\n return True\r\n else:\r\n return False\r\n def check_qualification(self):\r\n if self.validate_marks() and self.validate_age():\r\n if self.__marks>=65:\r\n return True\r\n else:\r\n return True\r\n def choose_course(self):\r\n if self.check_qualification():\r\n self.fees=None\r\n self.course=int(input(\"\\nyou are eligible for admission,\\ninput 1001 to choose course 1001 (default fees 25575.0)\\ninput 1002 to choose course 1002 (default fees 15500.0)\\nyour choice: \"))\r\n if self.course==1001 and self.__marks>85:\r\n self.fees =25575.0- 0.25*25575.0\r\n elif self.course==1001 and self.__marks<=85:\r\n self.fees =25575.0\r\n elif self.course==1002 and self.__marks>85:\r\n self.fees =15500.0- 0.25*15500.0\r\n elif self.course==1002 and self.__marks<=85:\r\n self.fees =15500.0\r\n print(f\"\\nyou have joined course {self.course} and your fees is {self.fees}\")\r\n \r\n\r\ns1=student()\r\ns1.set_details(int(input(\"enter id: \")),int(input(\"enter marks: \")),int(input(\"enter age: \")))\r\ns1.choose_course()\r\n \r\n \r\n \r\n\r\n'''\r\n\r\n#OR\r\n\r\nclass Student:\r\n def init(self):\r\n self.__s_id=None\r\n self.__s_age=None\r\n self.__s_marks=None\r\n def validate_marks(self):\r\n if(self.__s_marks>0 and self.__s_marks<100):\r\n return True\r\n else:\r\n return False\r\n def validate_age(self):\r\n if(self.__s_age>20):\r\n return True\r\n else:\r\n return False\r\n def check_qualification(self):\r\n if(self.validate_marks() and self.validate_age() and self.__s_marks>65):\r\n return True\r\n else:\r\n return False\r\n \r\n \r\n def choose(self,c_id):\r\n if self.check_qualification():\r\n if(c_id == 1001):\r\n f = 25575.0\r\n if(self.__s_marks>85):\r\n fees_to_paid = f - f*0.25\r\n print(\"fees to be paid is \",fees_to_paid)\r\n else:\r\n print(\"fees to be paid is \",f)\r\n elif(c_id == 1002):\r\n f = 15500.0\r\n if(self.__s_marks>85):\r\n fees_to_paid = f - f*0.25\r\n print(\"fees to be paid is \",fees_to_paid)\r\n else:\r\n print(\"fees to be paid is \",f)\r\n else:\r\n print(\"Invalid couse id\") \r\n else:\r\n print(\"not eligible\")\r\n def set_id(self,x):\r\n self.__s_id = x\r\n def get_id(self):\r\n return self.__s_id\r\n def set_age(self,x):\r\n self.__s_age = x\r\n def get_age(self):\r\n return self.__s_age\r\n def set_marks(self,x):\r\n self.__s_marks = x\r\n def get_marks(self):\r\n return self.__s_marks\r\n\r\n \r\ns1=Student()\r\ns1.set_id(1010)\r\ns1.set_age(98)\r\ns1.set_marks(66)\r\ns1.choose(1009)\r\n\r\ns2=Student()\r\ns2.set_id(1011)\r\ns2.set_age(36)\r\ns2.set_marks(99)\r\ns2.choose(1001)\r\n\r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"frazashraf123/Day-5-Python-","sub_path":"OOPs-2_17 March/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"21910250361","text":"import ML_model\nimport threading\n################ Meta Data ######################\n#category={0:'Grade 1',1:'Grade 2',2: 'Grade 3', 3: 'Grade 4'}\n#category_size={0:'<<Large>>',1:'<<Small>>'}\n#################################################\n\nfinal_result_s1 = []\nfinal_result_s2 = []\nfinal_result_s3 = []\nfinal_result_s4 = []\n\n \ndef S1_Result(): \n final_result_s1.append(ML_model.spots_Analyzer('s1'))\n print(\"S1 > \" + final_result_s1[0][0])\n \ndef S2_Result(): \n final_result_s2.append(ML_model.spots_Analyzer('s2'))\n print(\"S2 > \" + final_result_s2[0][0])\n \ndef S3_Result(): \n final_result_s3.append(ML_model.spots_Analyzer('s3'))\n print(\"S3 > \" + final_result_s3[0][0])\n \ndef S4_Result(): \n final_result_s4.append(ML_model.spots_Analyzer('s4'))\n print(\"S4 > \" + final_result_s4[0][0])\n\n\n \n \n# creating thread \nt1 = threading.Thread(target=S1_Result) \nt2 = threading.Thread(target=S2_Result)\nt3 = threading.Thread(target=S3_Result)\nt4 = threading.Thread(target=S4_Result)\n \n# starting thread 1 \nt1.start() \n# starting thread 2 \nt2.start()\n# starting thread 3 \nt3.start()\n# starting thread 4 \nt4.start() \n \n# wait until thread 1 is completely executed \nt1.join() \n# wait until thread 2 is completely executed \nt2.join()\n# wait until thread 3 is completely executed \nt3.join()\n# wait until thread 4 is completely executed \nt4.join() \n\nif (final_result_s1[0][1] == 3 or\n final_result_s2[0][1] == 3 or\n final_result_s3[0][1] == 3 or\n final_result_s4[0][1] == 3 ):\n print(\"Final Grade => \"+ \"Grade 4\")\nelif(final_result_s1[0][1] == 2 or\n final_result_s2[0][1] == 2 or\n final_result_s3[0][1] == 2 or\n final_result_s4[0][1] == 2 ):\n print(\"Final Grade => \"+ \"Grade 3\")\nelif(final_result_s1[0][1] == 1 or\n final_result_s2[0][1] == 1 or\n final_result_s3[0][1] == 1 or\n final_result_s4[0][1] == 1 ):\n final = ML_model.final_grade(1,'s2') ## Apply Size Analyzer\n print(\"Final Grade => \"+ final)\nelse:\n final = ML_model.final_grade(0,'s2') ## Apply Size Analyzer\n print(\"Final Grade => \"+ final)\n \n# All the threads completely executed \nprint(\"Done!\")\n\n\n# instruction for versions\n'''\nI am trying to use the tensorflow/keras as server with multithreading.\nSame error with:\nkeras==2.3.1\ntensorflow==2.0.0\nSolve the issue with:\ntensorflow==1.15 @caigen\nhttps://github.com/keras-team/keras/issues/13353\n'''\n\n\n\n\n","repo_name":"jpssasadara/Mango-project-release-V1","sub_path":"ML_Part_backend/back-end-V1/ML_model_Testing_multi_threding_Env.py","file_name":"ML_model_Testing_multi_threding_Env.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13142574463","text":"import os\nimport torch\nfrom PIL import Image, ImageFile\nfrom torchvision import transforms\nimport torchvision.datasets.folder\nfrom torch.utils.data import TensorDataset, Subset\nfrom torchvision.datasets import MNIST, ImageFolder\nfrom torchvision.transforms.functional import rotate\n\nfrom wilds.datasets.camelyon17_dataset import Camelyon17Dataset\nfrom wilds.datasets.fmow_dataset import FMoWDataset\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nDATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"DomainNet\",\n \"SVIRO\",\n # WILDS datasets\n \"WILDSCamelyon\",\n \"WILDSFMoW\"\n]\n\n\ndef get_dataset_class(dataset_name):\n \"\"\"Return the dataset class with the given name.\"\"\"\n if dataset_name not in globals():\n raise NotImplementedError(\"Dataset not found: {}\".format(dataset_name))\n return globals()[dataset_name]\n\n\ndef num_environments(dataset_name):\n return len(get_dataset_class(dataset_name).ENVIRONMENTS)\n\n\nclass MultipleDomainDataset:\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 8 # Default, subclasses may override\n ENVIRONMENTS = None # Subclasses should override\n INPUT_SHAPE = None # Subclasses should override\n\n def __getitem__(self, index):\n return self.datasets[index]\n\n def __len__(self):\n return len(self.datasets)\n\n\nclass Debug(MultipleDomainDataset):\n def __init__(self, root, test_envs, hparams):\n super().__init__()\n self.input_shape = self.INPUT_SHAPE\n self.num_classes = 2\n self.datasets = []\n for _ in [0, 1, 2]:\n self.datasets.append(\n TensorDataset(\n torch.randn(16, *self.INPUT_SHAPE),\n torch.randint(0, self.num_classes, (16,))\n )\n )\n\n\nclass Debug28(Debug):\n INPUT_SHAPE = (3, 28, 28)\n ENVIRONMENTS = ['0', '1', '2']\n\n\nclass Debug224(Debug):\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = ['0', '1', '2']\n\n\nclass MultipleEnvironmentMNIST(MultipleDomainDataset):\n def __init__(self, root, environments, dataset_transform, input_shape,\n num_classes):\n super().__init__()\n if root is None:\n raise ValueError('Data directory not specified!')\n\n original_dataset_tr = MNIST(root, train=True, download=True)\n original_dataset_te = MNIST(root, train=False, download=True)\n\n original_images = torch.cat((original_dataset_tr.data,\n original_dataset_te.data))\n\n original_labels = torch.cat((original_dataset_tr.targets,\n original_dataset_te.targets))\n\n shuffle = torch.randperm(len(original_images))\n\n original_images = original_images[shuffle]\n original_labels = original_labels[shuffle]\n\n self.datasets = []\n\n for i in range(len(environments)):\n images = original_images[i::len(environments)]\n labels = original_labels[i::len(environments)]\n self.datasets.append(dataset_transform(images, labels, environments[i]))\n\n self.input_shape = input_shape\n self.num_classes = num_classes\n\n\nclass ColoredMNIST(MultipleEnvironmentMNIST):\n ENVIRONMENTS = ['+90%', '+80%', '-90%']\n\n def __init__(self, root, test_envs, hparams):\n super(ColoredMNIST, self).__init__(root, [0.1, 0.2, 0.9],\n self.color_dataset, (2, 28, 28,), 2)\n\n self.input_shape = (2, 28, 28,)\n self.num_classes = 2\n\n def color_dataset(self, images, labels, environment):\n # # Subsample 2x for computational convenience\n # images = images.reshape((-1, 28, 28))[:, ::2, ::2]\n # Assign a binary label based on the digit\n labels = (labels < 5).float()\n # Flip label with probability 0.25\n labels = self.torch_xor_(labels,\n self.torch_bernoulli_(0.25, len(labels)))\n\n # Assign a color based on the label; flip the color with probability e\n colors = self.torch_xor_(labels,\n self.torch_bernoulli_(environment,\n len(labels)))\n images = torch.stack([images, images], dim=1)\n # Apply the color to the image by zeroing out the other color channel\n images[torch.tensor(range(len(images))), (\n 1 - colors).long(), :, :] *= 0\n\n x = images.float().div_(255.0)\n y = labels.view(-1).long()\n\n return TensorDataset(x, y)\n\n def torch_bernoulli_(self, p, size):\n return (torch.rand(size) < p).float()\n\n def torch_xor_(self, a, b):\n return (a - b).abs()\n\n\nclass RotatedMNIST(MultipleEnvironmentMNIST):\n ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']\n\n def __init__(self, root, test_envs, hparams):\n super(RotatedMNIST, self).__init__(root, [0, 15, 30, 45, 60, 75],\n self.rotate_dataset, (1, 28, 28,), 10)\n\n def rotate_dataset(self, images, labels, angle):\n rotation = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Lambda(lambda x: rotate(x, angle, fill=(0,),\n interpolation=torchvision.transforms.InterpolationMode.BILINEAR)),\n transforms.ToTensor()])\n\n x = torch.zeros(len(images), 1, 28, 28)\n for i in range(len(images)):\n x[i] = rotation(images[i])\n\n y = labels.view(-1)\n\n return TensorDataset(x, y)\n\n\nclass MultipleEnvironmentImageFolder(MultipleDomainDataset):\n def __init__(self, root, test_envs, augment, hparams):\n super().__init__()\n environments = [f.name for f in os.scandir(root) if f.is_dir()]\n environments = sorted(environments)\n\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n augment_transform = transforms.Compose([\n # transforms.Resize((224,224)),\n transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),\n transforms.RandomGrayscale(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n self.datasets = []\n for i, environment in enumerate(environments):\n\n if augment and (i not in test_envs):\n env_transform = augment_transform\n else:\n env_transform = transform\n\n path = os.path.join(root, environment)\n env_dataset = ImageFolder(path,\n transform=env_transform)\n\n self.datasets.append(env_dataset)\n\n self.input_shape = (3, 224, 224,)\n self.num_classes = len(self.datasets[-1].classes)\n\n\nclass VLCS(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"C\", \"L\", \"S\", \"V\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"VLCS/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass PACS(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"S\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"PACS/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass DomainNet(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 1000\n ENVIRONMENTS = [\"clip\", \"info\", \"paint\", \"quick\", \"real\", \"sketch\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"domain_net/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass OfficeHome(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"R\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"office_home/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass TerraIncognita(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"L100\", \"L38\", \"L43\", \"L46\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"terra_incognita/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass SVIRO(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"aclass\", \"escape\", \"hilux\", \"i3\", \"lexus\", \"tesla\", \"tiguan\", \"tucson\", \"x5\", \"zoe\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"sviro/\")\n super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)\n\n\nclass WILDSEnvironment:\n def __init__(\n self,\n wilds_dataset,\n metadata_name,\n metadata_value,\n transform=None):\n self.name = metadata_name + \"_\" + str(metadata_value)\n\n metadata_index = wilds_dataset.metadata_fields.index(metadata_name)\n metadata_array = wilds_dataset.metadata_array\n subset_indices = torch.where(\n metadata_array[:, metadata_index] == metadata_value)[0]\n\n self.dataset = wilds_dataset\n self.indices = subset_indices\n self.transform = transform\n\n def __getitem__(self, i):\n x = self.dataset.get_input(self.indices[i])\n if type(x).__name__ != \"Image\":\n x = Image.fromarray(x)\n\n y = self.dataset.y_array[self.indices[i]]\n if self.transform is not None:\n x = self.transform(x)\n return x, y\n\n def __len__(self):\n return len(self.indices)\n\n\nclass WILDSDataset(MultipleDomainDataset):\n INPUT_SHAPE = (3, 224, 224)\n\n def __init__(self, dataset, metadata_name, test_envs, augment, hparams):\n super().__init__()\n\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n augment_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),\n transforms.RandomGrayscale(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n self.datasets = []\n\n for i, metadata_value in enumerate(\n self.metadata_values(dataset, metadata_name)):\n if augment and (i not in test_envs):\n env_transform = augment_transform\n else:\n env_transform = transform\n\n env_dataset = WILDSEnvironment(\n dataset, metadata_name, metadata_value, env_transform)\n\n self.datasets.append(env_dataset)\n\n self.input_shape = (3, 224, 224,)\n self.num_classes = dataset.n_classes\n\n def metadata_values(self, wilds_dataset, metadata_name):\n metadata_index = wilds_dataset.metadata_fields.index(metadata_name)\n metadata_vals = wilds_dataset.metadata_array[:, metadata_index]\n return sorted(list(set(metadata_vals.view(-1).tolist())))\n\n\nclass WILDSCamelyon(WILDSDataset):\n ENVIRONMENTS = [\"hospital_0\", \"hospital_1\", \"hospital_2\", \"hospital_3\",\n \"hospital_4\"]\n\n def __init__(self, root, test_envs, hparams):\n dataset = Camelyon17Dataset(root_dir=root)\n super().__init__(\n dataset, \"hospital\", test_envs, hparams['data_augmentation'], hparams)\n\n\nclass WILDSFMoW(WILDSDataset):\n ENVIRONMENTS = [\"region_0\", \"region_1\", \"region_2\", \"region_3\",\n \"region_4\", \"region_5\"]\n\n def __init__(self, root, test_envs, hparams):\n dataset = FMoWDataset(root_dir=root)\n super().__init__(\n dataset, \"region\", test_envs, hparams['data_augmentation'], hparams)\n\n\n\"\"\"\nThings that don't belong anywhere else\n\"\"\"\n\nimport hashlib\nimport json\nimport os\nimport sys\nfrom shutil import copyfile\nfrom collections import OrderedDict, defaultdict\nfrom numbers import Number\nimport operator\n\nimport numpy as np\nimport torch\nimport tqdm\nfrom collections import Counter\n\n\ndef l2_between_dicts(dict_1, dict_2):\n assert len(dict_1) == len(dict_2)\n dict_1_values = [dict_1[key] for key in sorted(dict_1.keys())]\n dict_2_values = [dict_2[key] for key in sorted(dict_1.keys())]\n return (\n torch.cat(tuple([t.view(-1) for t in dict_1_values])) -\n torch.cat(tuple([t.view(-1) for t in dict_2_values]))\n ).pow(2).mean()\n\n\nclass MovingAverage:\n\n def __init__(self, ema, oneminusema_correction=True):\n self.ema = ema\n self.ema_data = {}\n self._updates = 0\n self._oneminusema_correction = oneminusema_correction\n\n def update(self, dict_data):\n ema_dict_data = {}\n for name, data in dict_data.items():\n data = data.view(1, -1)\n if self._updates == 0:\n previous_data = torch.zeros_like(data)\n else:\n previous_data = self.ema_data[name]\n\n ema_data = self.ema * previous_data + (1 - self.ema) * data\n if self._oneminusema_correction:\n # correction by 1/(1 - self.ema)\n # so that the gradients amplitude backpropagated in data is independent of self.ema\n ema_dict_data[name] = ema_data / (1 - self.ema)\n else:\n ema_dict_data[name] = ema_data\n self.ema_data[name] = ema_data.clone().detach()\n\n self._updates += 1\n return ema_dict_data\n\n\ndef make_weights_for_balanced_classes(dataset):\n counts = Counter()\n classes = []\n for _, y in dataset:\n y = int(y)\n counts[y] += 1\n classes.append(y)\n\n n_classes = len(counts)\n\n weight_per_class = {}\n for y in counts:\n weight_per_class[y] = 1 / (counts[y] * n_classes)\n\n weights = torch.zeros(len(dataset))\n for i, y in enumerate(classes):\n weights[i] = weight_per_class[int(y)]\n\n return weights\n\n\ndef pdb():\n sys.stdout = sys.__stdout__\n import pdb\n print(\"Launching PDB, enter 'n' to step to parent function.\")\n pdb.set_trace()\n\n\ndef seed_hash(*args):\n \"\"\"\n Derive an integer hash from all args, for use as a random seed.\n \"\"\"\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2 ** 31)\n\n\ndef print_separator():\n print(\"=\" * 80)\n\n\ndef print_row(row, colwidth=10, latex=False):\n if latex:\n sep = \" & \"\n end_ = \"\\\\\\\\\"\n else:\n sep = \" \"\n end_ = \"\"\n\n def format_val(x):\n if np.issubdtype(type(x), np.floating):\n x = \"{:.10f}\".format(x)\n return str(x).ljust(colwidth)[:colwidth]\n\n print(sep.join([format_val(x) for x in row]), end_)\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=True,\n num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset,\n replacement=True)\n\n if weights == None:\n weights = torch.ones(len(dataset))\n\n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=True)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError\n\n\nclass FastDataLoader:\n \"\"\"DataLoader wrapper with slightly improved speed by not respawning worker\n processes at every epoch.\"\"\"\n\n def __init__(self, dataset, batch_size, num_workers):\n super().__init__()\n\n batch_sampler = torch.utils.data.BatchSampler(\n torch.utils.data.RandomSampler(dataset, replacement=False),\n batch_size=batch_size,\n drop_last=False\n )\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n self._length = len(batch_sampler)\n\n def __iter__(self):\n for _ in range(len(self)):\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return self._length\n\n\nclass _SplitDataset(torch.utils.data.Dataset):\n \"\"\"Used by split_dataset\"\"\"\n\n def __init__(self, underlying_dataset, keys):\n super(_SplitDataset, self).__init__()\n self.underlying_dataset = underlying_dataset\n self.keys = keys\n\n def __getitem__(self, key):\n return self.underlying_dataset[self.keys[key]]\n\n def __len__(self):\n return len(self.keys)\n\n\ndef split_dataset(dataset, n, seed=0):\n \"\"\"\n Return a pair of datasets corresponding to a random split of the given\n dataset, with n datapoints in the first dataset and the rest in the last,\n using the given random seed\n \"\"\"\n assert (n <= len(dataset))\n keys = list(range(len(dataset)))\n np.random.RandomState(seed).shuffle(keys)\n keys_1 = keys[:n]\n keys_2 = keys[n:]\n return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)\n\n\ndef random_pairs_of_minibatches(minibatches):\n perm = torch.randperm(len(minibatches)).tolist()\n pairs = []\n\n for i in range(len(minibatches)):\n j = i + 1 if i < (len(minibatches) - 1) else 0\n\n xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]\n xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]\n\n min_n = min(len(xi), len(xj))\n\n pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))\n\n return pairs\n\n\ndef accuracy(network, loader, weights, device):\n correct = 0\n total = 0\n weights_offset = 0\n\n network.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device)\n y = y.to(device)\n p = network.predict(x)\n if weights is None:\n batch_weights = torch.ones(len(x))\n else:\n batch_weights = weights[weights_offset: weights_offset + len(x)]\n weights_offset += len(x)\n batch_weights = batch_weights.to(device)\n if p.size(1) == 1:\n correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()\n else:\n correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()\n total += batch_weights.sum().item()\n network.train()\n\n return correct / total\n\n\nclass Tee:\n def __init__(self, fname, mode=\"a\"):\n self.stdout = sys.stdout\n self.file = open(fname, mode)\n\n def write(self, message):\n self.stdout.write(message)\n self.file.write(message)\n self.flush()\n\n def flush(self):\n self.stdout.flush()\n self.file.flush()\n\n\nclass ParamDict(OrderedDict):\n \"\"\"Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.\n A dictionary where the values are Tensors, meant to represent weights of\n a model. This subclass lets you perform arithmetic on weights directly.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, *kwargs)\n\n def _prototype(self, other, op):\n if isinstance(other, Number):\n return ParamDict({k: op(v, other) for k, v in self.items()})\n elif isinstance(other, dict):\n return ParamDict({k: op(self[k], other[k]) for k in self})\n else:\n raise NotImplementedError\n\n def __add__(self, other):\n return self._prototype(other, operator.add)\n\n def __rmul__(self, other):\n return self._prototype(other, operator.mul)\n\n __mul__ = __rmul__\n\n def __neg__(self):\n return ParamDict({k: -v for k, v in self.items()})\n\n def __rsub__(self, other):\n # a- b := a + (-b)\n return self.__add__(other.__neg__())\n\n __sub__ = __rsub__\n\n def __truediv__(self, other):\n return self._prototype(other, operator.truediv)\n\n\ndef get_data_loaders(args, env_num, hparams):\n if args.dataset_name in DATASETS:\n dataset = DATASETS[args.dataset_name](args.root_dir,\n args.test_envs, hparams)\n else:\n raise NotImplementedError\n\n # Split each env into an 'in-split' and an 'out-split'. We'll train on\n # each in-split except the test envs, and evaluate on all splits.\n\n # To allow unsupervised domain adaptation experiments, we split each test\n # env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used\n # by collect_results.py to compute classification accuracies. The\n # 'out-split' is used by the Oracle model selectino method. The unlabeled\n # samples in 'uda-split' are passed to the algorithm at training time if\n # args.task == \"domain_adaptation\". If we are interested in comparing\n # domain generalization and domain adaptation results, then domain\n # generalization algorithms should create the same 'uda-splits', which will\n # be discared at training.\n\n in_splits = []\n out_splits = []\n uda_splits = []\n for env_i, env in enumerate(dataset):\n uda = []\n\n out, in_ = split_dataset(env,\n int(len(env) * args.holdout_fraction),\n seed_hash(args.trial_seed, env_i))\n\n if env_i in args.test_envs:\n uda, in_ = split_dataset(in_,\n int(len(in_) * args.uda_holdout_fraction),\n seed_hash(args.trial_seed, env_i))\n\n if hparams['class_balanced']:\n in_weights = make_weights_for_balanced_classes(in_)\n out_weights = make_weights_for_balanced_classes(out)\n if uda is not None:\n uda_weights = make_weights_for_balanced_classes(uda)\n else:\n in_weights, out_weights, uda_weights = None, None, None\n in_splits.append((in_, in_weights))\n out_splits.append((out, out_weights))\n if len(uda):\n uda_splits.append((uda, uda_weights))\n\n if args.task == \"domain_adaptation\" and len(uda_splits) == 0:\n raise ValueError(\"Not enough unlabeled samples for domain adaptation.\")\n\n train_loaders = [InfiniteDataLoader(\n dataset=env,\n weights=env_weights,\n batch_size=args.batch_size,\n num_workers=dataset.N_WORKERS)\n for i, (env, env_weights) in enumerate(in_splits)\n if i not in args.test_envs]\n\n uda_loaders = [InfiniteDataLoader(\n dataset=env,\n weights=env_weights,\n batch_size=args.batch_size,\n num_workers=dataset.N_WORKERS)\n for i, (env, env_weights) in enumerate(uda_splits)\n if i in args.test_envs]\n\n eval_loaders = [FastDataLoader(\n dataset=env,\n batch_size=args.batch_size,\n num_workers=dataset.N_WORKERS)\n for env, _ in (in_splits + out_splits + uda_splits)]\n\n return train_loaders[env_num], uda_loaders[env_num], eval_loaders[env_num]\n","repo_name":"alexchandler100/MAT_180","sub_path":"learning-robust-representations-learned-through-distribution-shift/utils_refactor/domainbed_utils.py","file_name":"domainbed_utils.py","file_ext":"py","file_size_in_byte":24408,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"43491184248","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport re\nimport csv\nimport textwrap\n\n__all__ = [\"CheckFileExt\", \"CheckTextValue\", \"DoesSMILESFileContainTitleLine\", \"GetExamplesTextFromDocOptText\", \"GetExcelStyleColumnLabel\", \"GetMayaChemToolsLibDataPath\", \"GetTextLinesWords\", \"GetWallClockAndProcessorTime\", \"GetFormattedElapsedTime\", \"IsEmpty\", \"IsFloat\", \"IsInteger\", \"IsNumber\", \"JoinWords\", \"ParseFileName\", \"PrintError\", \"PrintInfo\", \"PrintWarning\", \"ProcessOptionInfileParameters\", \"ProcessOptionOutfileParameters\", \"ReplaceHTMLEntitiesInText\", \"ValidateOptionsDistinctFileNames\", \"ValidateOptionFileExt\", \"ValidateOptionFilePath\", \"ValidateOptionFloatValue\", \"ValidateOptionIntegerValue\", \"ValidateOptionNumberValue\", \"ValidateOptionNumberValues\", \"ValidateOptionsOutputFileOverwrite\", \"ValidateOptionTextValue\", \"TruncateText\", \"WrapText\"]\n\ndef CheckFileExt(FileName, FileExts):\n \"\"\"Check file type based on the specified file extensions delimited by spaces.\n \n Arguments:\n FileName (str): Name of a file.\n FileExts (str): Space delimited string containing valid file extensions.\n\n Returns:\n bool : True, FileName contains a valid file extension; Otherwise, False.\n\n \"\"\"\n \n for FileExt in FileExts.split():\n if re.search(r\"\\.%s$\" % FileExt, FileName, re.IGNORECASE):\n return True\n \n return False\n\ndef CheckTextValue(Value, ValidValues):\n \"\"\"Check text value based on the specified valid values delimited by spaces.\n\n Arguments:\n Value (str): Text value\n ValidValues (str): Space delimited string containing valid values.\n\n Returns:\n bool : True, Value is valid; Otherwise, False.\n\n \"\"\"\n \n ValidValues = re.sub(' ', '|', ValidValues)\n if re.match(\"^(%s)$\" % ValidValues, Value, re.IGNORECASE):\n return True\n \n return False\n\ndef GetTextLinesWords(TextFilePath, Delimiter, QuoteChar, IgnoreHeaderLine):\n \"\"\"Parse lines in the specified text file into words in a line and return a list containing\n list of parsed line words.\n\n Arguments:\n TextFilePath (str): Text file name including file path.\n Delimiter (str): Delimiter for parsing text lines.\n QuoteChar (str): Quote character for line words.\n IgnoreHeaderLine (bool): A flag indicating whether to ignore first\n valid data line corresponding to header line.\n\n Returns:\n list : A list of lists containing parsed words for lines.\n\n Notes:\n The lines starting with # or // are considered comment lines and are\n ignored during parsing along with any empty lines.\n\n \"\"\"\n TextFile = open(TextFilePath, \"r\")\n if TextFile is None:\n PrintError(\"Couldn't open text file: %s.\\n\" % (TextFilePath))\n\n # Collect text lines...\n TextLines = []\n FirstValidLine = True\n for Line in TextFile:\n Line = Line.strip()\n \n # Ignore empty lines...\n if not len(Line):\n continue\n \n # Ignore comments...\n if re.match(\"^(#|\\/\\/)\", Line, re.I):\n continue\n\n # Ignore header line...\n if FirstValidLine:\n FirstValidLine = False\n if IgnoreHeaderLine:\n continue\n \n TextLines.append(Line)\n \n TextFile.close()\n\n # Parse text lines...\n TextLinesWords = []\n \n TextLinesReader = csv.reader(TextLines, delimiter = Delimiter, quotechar = QuoteChar)\n for LineWords in TextLinesReader:\n TextLinesWords.append(LineWords)\n \n return TextLinesWords\n \ndef DoesSMILESFileContainTitleLine(FileName):\n \"\"\"Determine whether the SMILES file contain a title line based on the presence\n of a string SMILES, Name or ID in the first line.\n\n Arguments:\n FileName (str): Name of a file.\n\n Returns:\n bool : True, File contains title line; Otherwise, False.\n\n \"\"\"\n \n Infile = open(FileName, \"r\")\n if Infile is None:\n return False\n\n Line = Infile.readline()\n Infile.close()\n\n if re.search(\"(SMILES|Name|ID)\", Line, re.I):\n return True\n \n return False\n \ndef GetExamplesTextFromDocOptText(DocOptText):\n \"\"\"Get script usage example lines from a docopt doc string. The example text\n line start from a line containing `Examples:` keyword at the beginning of the line.\n \n Arguments:\n DocOptText (str): Doc string containing script usage examples lines starting with\n a line marked by `Examples:` keyword at the beginning of a line.\n\n Returns:\n str : A string containing text lines retrieved from the examples section of\n DocOptText parameter.\n\n \"\"\"\n \n ExamplesStart = re.compile(\"^Examples:\", re.IGNORECASE)\n ExamplesEnd = re.compile(\"^(Author:|See also:|Copyright:)\", re.IGNORECASE)\n \n ExamplesText = 'Examples text is not available';\n ExamplesTextFound = False\n \n for Line in DocOptText.splitlines():\n if ExamplesStart.match(Line):\n ExamplesText = 'Examples:';\n ExamplesTextFound = True\n continue\n \n if ExamplesEnd.match(Line):\n break\n \n if ExamplesTextFound:\n ExamplesText += \"\\n\" + Line;\n \n return ExamplesText\n\ndef GetExcelStyleColumnLabel(ColNum):\n \"\"\"Return Excel style column label for a colum number.\n \n Arguments:\n ColNum (int): Column number\n\n Returns:\n str : Excel style column label.\n\n \"\"\"\n Letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n ColLabelList = []\n while ColNum:\n ColNum, SubColNum = divmod(ColNum - 1, 26)\n ColLabelList[:0] = Letters[SubColNum]\n \n return ''.join(ColLabelList)\n \ndef GetWallClockAndProcessorTime():\n \"\"\"Get wallclock and processor times in seconds.\n \n Returns:\n float : Wallclock time.\n float : Processor time.\n\n \"\"\"\n return (time.time(), time.clock())\n\ndef GetMayaChemToolsLibDataPath():\n \"\"\"Get location of MayaChemTools lib data directory.\n \n Returns:\n str : Location of MayaChemTools lib data directory.\n\n Notes:\n The location of MayaChemTools lib data directory is determined relative to\n MayaChemTools python lib directory name available through sys.path.\n\n \"\"\"\n MayaChemToolsDataPath = \"\"\n \n for PathEntry in sys.path:\n if re.search(\"MayaChemTools\", PathEntry, re.I) and re.search(\"Python\", PathEntry, re.I):\n MayaChemToolsDataPath = os.path.join( PathEntry, \"..\", \"data\")\n break\n else:\n PrintInfo(\"PathEntry didn't match\")\n \n if not len(MayaChemToolsDataPath):\n PrintWarning(\"MayaChemTools lib directory location doesn't appear to exist in system search path specified by sys.path...\")\n \n return MayaChemToolsDataPath\n \ndef GetFormattedElapsedTime(StartingWallClockTime, StartingProcessorTime):\n \"\"\"Get elapsed wallclock and processor times as a string in the following\n format: %d wallclock secs ( %.2f process secs).\n \n Arguments:\n StartingWallClockTime (float): Starting wallclock time in seconds.\n StartingProcessorTime (float): Starting processor time in seconds.\n\n Returns:\n str : Elapsed time formatted as: %d wallclock secs ( %.2f process secs)\n\n \"\"\"\n \n ElapsedWallClockTime = time.time() - StartingWallClockTime\n ElapsedProcessorTime = time.clock() - StartingProcessorTime\n \n ElapsedTime = \"%d wallclock secs ( %.2f process secs)\" % (ElapsedWallClockTime, ElapsedProcessorTime)\n \n return ElapsedTime\n\ndef IsEmpty(Value):\n \"\"\"Determine whether the specified value is empty after converting\n it in to a string and removing all leading and trailing white spaces. A value\n of type None is considered empty.\n \n Arguments:\n Value (str, int or float): Text or a value\n\n Returns:\n bool : True, Text string is empty; Otherwsie, False.\n\n \"\"\"\n\n if Value is None:\n return True\n\n TextValue = \"%s\" % Value\n TextValue = TextValue.strip()\n\n return False if len(TextValue) else True\n\ndef IsFloat(Value):\n \"\"\"Determine whether the specified value is a float by converting it\n into a float.\n \n Arguments:\n Value (str, int or float): Text\n\n Returns:\n bool : True, Value is a float; Otherwsie, False.\n\n \"\"\"\n\n return IsNumber(Value)\n\ndef IsInteger(Value):\n \"\"\"Determine whether the specified value is an integer by converting it\n into an int.\n \n Arguments:\n Value (str, int or float): Text\n\n Returns:\n bool : True, Value is an integer; Otherwsie, False.\n\n \"\"\"\n\n Status = True\n \n if Value is None:\n return False\n\n try:\n Value = int(Value)\n Status = True\n except ValueError:\n Status = False\n \n return Status\n\ndef IsNumber(Value):\n \"\"\"Determine whether the specified value is a number by converting it\n into a float.\n \n Arguments:\n Value (str, int or float): Text\n\n Returns:\n bool : True, Value is a number; Otherwsie, False.\n\n \"\"\"\n\n Status = True\n \n if Value is None:\n return Status\n\n try:\n Value = float(Value)\n Status = True\n except ValueError:\n Status = False\n \n return Status\n\ndef JoinWords(Words, Delimiter, Quote = False):\n \"\"\"Join words in a list using specified delimiter with optional quotes around words.\n \n Arguments:\n Words (list): List containing words to join.\n Delimiter (string): Delimiter for joining words.\n Quote (boolean): Put quotes around words.\n\n Returns:\n str : String containing joined words.\n\n \"\"\"\n \n if Quote:\n JoinedWords = Delimiter.join('\"{0}\"'.format(Word) for Word in Words)\n else:\n JoinedWords = Delimiter.join(Words)\n \n return JoinedWords\n \ndef ParseFileName(FilePath):\n \"\"\"Parse specified file path and return file dir, file name, and file extension.\n \n Arguments:\n FilePath (str): Name of a file with complete file path.\n\n Returns:\n str : File directory.\n str : File name without file extension.\n str : File extension.\n\n \"\"\"\n FileDir, FileBaseName = os.path.split(FilePath)\n FileName, FileExt = os.path.splitext(FileBaseName)\n \n if re.match(\"^\\.\", FileExt):\n FileExt = re.sub(\"^\\.\", \"\", FileExt)\n \n return (FileDir, FileName, FileExt)\n \ndef PrintError(Msg, Status=2):\n \"\"\"Print message to stderr along with flushing stderr and exit with a specified\n status. An `Error` prefix is placed before the message.\n \n Arguments:\n Msg (str): Text message.\n Status (int): Exit status.\n\n \"\"\"\n \n PrintInfo(\"Error: %s\" % Msg)\n sys.exit(Status)\n\ndef PrintInfo(Msg=''):\n \"\"\"Print message to stderr along with flushing stderr.\n \n Arguments:\n Msg (str): Text message.\n\n \"\"\"\n \n print(Msg, sep=' ', end='\\n', file=sys.stderr)\n sys.stderr.flush()\n\ndef PrintWarning(msg):\n \"\"\"Print message to stderr along with flushing stderr. An `Warning` prefix\n is placed before the message.\n \n Arguments:\n Msg (str): Text message.\n\n \"\"\"\n \n PrintInfo(\"Warning: %s\" % msg)\n\ndef ValidateOptionFileExt(OptionName, FileName, FileExts):\n \"\"\"Validate file type based on the specified file extensions delimited by spaces.\n \n Arguments:\n OptionName (str): Command line option name.\n FileName (str): Name of a file.\n FileExts (str): Space delimited string containing valid file extensions.\n\n Notes:\n The function exits with an error message for a file name containing\n invalid file extension.\n\n \"\"\"\n \n if not CheckFileExt(FileName, FileExts):\n PrintError(\"The file name specified , %s, for option \\\"%s\\\" is not valid. Supported file formats: %s\\n\" % (FileName, OptionName, FileExts))\n\ndef ValidateOptionFilePath(OptionName, FilePath):\n \"\"\"Validate presence of the file.\n \n Arguments:\n OptionName (str): Command line option name.\n FilePath (str): Name of a file with complete path.\n\n Notes:\n The function exits with an error message for a file path that doesn't exist.\n\n \"\"\"\n \n if not os.path.exists(FilePath):\n PrintError(\"The file specified, %s, for option \\\"%s\\\" doesn't exist.\\n\" % (FilePath, OptionName))\n\ndef ValidateOptionFloatValue(OptionName, OptionValue, CmpOpValueMap):\n \"\"\"Validate option value using comparison operater and value pairs in specified in\n a map.\n \n Arguments:\n OptionName (str): Command line option name.\n OptionValue (float or str): Command line option value.\n CmpOpValueMap (dictionary): Comparison operator key and value pairs to\n validate values specified in OptionValue.\n\n Notes:\n The function exits with an error message for an invalid option values specified\n in OptionValue.\n\n Examples:\n\n ValidateOptionNumberValue(\"-b, --butinaSimilarityCutoff\", \n Options[\"--butinaSimilarityCutoff\"],\n {\">\": 0.0, \"<=\" : 1.0})\n\n \"\"\"\n\n if not IsFloat(OptionValue):\n PrintError(\"The value specified, %s, for option \\\"%s\\\" must be a float.\" % (OptionValue, OptionName))\n \n return ValidateOptionNumberValue(OptionName, float(OptionValue), CmpOpValueMap)\n\ndef ValidateOptionIntegerValue(OptionName, OptionValue, CmpOpValueMap):\n \"\"\"Validate option value using comparison operater and value pairs in specified in\n a map.\n \n Arguments:\n OptionName (str): Command line option name.\n OptionValue (int or str): Command line option value.\n CmpOpValueMap (dictionary): Comparison operator key and value pairs to\n validate values specified in OptionValue.\n\n Notes:\n The function exits with an error message for an invalid option values specified\n in OptionValue.\n\n Examples:\n\n ValidateOptionIntegerValue(\"--maxConfs\", Options[\"--maxConfs\"],\n {\">\": 0})\n\n \"\"\"\n\n if not IsInteger(OptionValue):\n PrintError(\"The value specified, %s, for option \\\"%s\\\" must be an integer.\" % (OptionValue, OptionName))\n \n return ValidateOptionNumberValue(OptionName, int(OptionValue), CmpOpValueMap)\n\ndef ValidateOptionNumberValue(OptionName, OptionValue, CmpOpValueMap):\n \"\"\"Validate option value using comparison operater and value pairs in specified in\n a map.\n \n Arguments:\n OptionName (str): Command line option name.\n OptionValue (int or float): Command line option value.\n CmpOpValueMap (dictionary): Comparison operator key and value pairs to\n validate values specified in OptionValue.\n\n Notes:\n The function exits with an error message for an invalid option values specified\n in OptionValue.\n\n Examples:\n\n ValidateOptionNumberValue(\"--maxConfs\", int(Options[\"--maxConfs\"]),\n {\">\": 0})\n ValidateOptionNumberValue(\"-b, --butinaSimilarityCutoff\", \n float(Options[\"--butinaSimilarityCutoff\"]),\n {\">\": 0.0, \"<=\" : 1.0})\n\n \"\"\"\n \n Status = True\n for CmpOp in CmpOpValueMap:\n Value = CmpOpValueMap[CmpOp]\n if re.match(\"^>$\", CmpOp, re.I):\n if OptionValue <= Value:\n Status = False\n break\n elif re.match(\"^>=$\", CmpOp, re.I):\n if OptionValue < Value:\n Status = False\n break\n elif re.match(\"^<$\", CmpOp, re.I):\n if OptionValue >= Value:\n Status = False\n break\n elif re.match(\"^<=$\", CmpOp, re.I):\n if OptionValue > Value:\n Status = False\n break\n else:\n PrintError(\"The specified comparison operator, %s, for function MiscUtil.ValidateOptionNumberValue is not supported\\n\" % (CmpOp))\n \n if not Status:\n FirstValue = True\n SupportedValues = \"\"\n for CmpOp in CmpOpValueMap:\n Value = CmpOpValueMap[CmpOp]\n if FirstValue:\n FirstValue = False\n SupportedValues = \"%s %s\" % (CmpOp, Value)\n else:\n SupportedValues = \"%s and %s %s\" % (SupportedValues, CmpOp, Value)\n \n PrintError(\"The value specified, %s, for option \\\"%s\\\" is not valid. Supported value(s): %s \" % (OptionValue, OptionName, SupportedValues))\n\ndef ValidateOptionNumberValues(OptionName, OptionValueString, OptionValueCount, OptionValueDelimiter, OptionValueType, CmpOpValueMap):\n \"\"\"Validate numerical option values using option value string, delimiter, value type,\n and a specified map containing comparison operator and value pairs.\n \n Arguments:\n OptionName (str): Command line option name.\n OptionValueString (str): Command line option value.\n OptionValueCount (int): Number of values in OptionValueString.\n OptionValueDelimiter (str): Delimiter used for values in OptionValueString.\n OptionValueType (str): Valid number types (integer or float)\n CmpOpValueMap (dictionary): Comparison operator key and value pairs to\n validate values specified in OptionValueString.\n\n Notes:\n The function exits with an error message for invalid option values specified\n in OptionValueString\n\n Examples:\n\n ValidateOptionNumberValues(\"-m, --molImageSize\",\n Options[\"--molImageSize\"], 2, \",\", \"integer\", {\">\": 0})\n\n \"\"\"\n if not CheckTextValue(OptionValueType, \"integer float\"):\n PrintError(\"The option value type specified, %s, for function MiscUtil.ValidateOptionNumberValues is not valid. Supported value: integer float \" % (OptionValueType))\n \n Values = OptionValueString.split(OptionValueDelimiter)\n if len(Values) != OptionValueCount:\n PrintError(\"The value specified, %s, for option \\\"%s\\\" is not valid. It must contain %d %s values separated by \\\"%s\\\"\" % (OptionValueString, OptionName, OptionValueCount, OptionValueType, OptionValueDelimiter))\n\n IsIntergerValue = True\n if re.match(\"^float$\", OptionValueType, re.I):\n IsIntergerValue = False\n \n for Value in Values:\n if IsIntergerValue:\n if not IsInteger(Value):\n PrintError(\"The value specified, %s, for option \\\"%s\\\" in string \\\"%s\\\" must be an integer.\" % (Value, OptionName, OptionValueString))\n Value = int(Value)\n else:\n if not IsFloat(Value):\n PrintError(\"The value specified, %s, for option \\\"%s\\\" in string \\\"%s\\\" must be a float.\" % (Value, OptionName, OptionValueString))\n Value = float(Value)\n ValidateOptionNumberValue(OptionName, Value, CmpOpValueMap)\n \ndef ValidateOptionTextValue(OptionName, OptionValue, ValidValues):\n \"\"\"Validate option value based on the valid specified values separated by spaces.\n \n Arguments:\n OptionName (str): Command line option name.\n OptionValue (str): Command line option value.\n ValidValues (str): Space delimited string containing valid values.\n\n Notes:\n The function exits with an error message for an invalid option value.\n\n \"\"\"\n \n if not CheckTextValue(OptionValue, ValidValues):\n PrintError(\"The value specified, %s, for option \\\"%s\\\" is not valid. Supported value(s): %s \" % (OptionValue, OptionName, ValidValues))\n\ndef ValidateOptionsOutputFileOverwrite(OptionName, FilePath, OverwriteOptionName, OverwriteStatus):\n \"\"\"Validate overwriting of output file.\n \n Arguments:\n OptionName (str): Command line option name.\n FilePath (str): Name of a file with complete file path.\n OverwriteOptionName (str): Overwrite command line option name.\n OverwriteStatus (bool): True, overwrite\n\n Notes:\n The function exits with an error message for a file that is present and is not allowed\n to be written as indicated by value of OverwriteStatus.\n\n \"\"\"\n \n if os.path.exists(FilePath):\n if not OverwriteStatus:\n if len(OverwriteOptionName) > 4:\n ShortOverwriteOptionName = OverwriteOptionName[:4]\n else:\n ShortOverwriteOptionName = OverwriteOptionName\n \n PrintError(\"The file specified, %s, for option \\\"%s\\\" already exist. Use option \\\"%s\\\" or \\\"%s\\\" and try again.\\n\" % (FilePath, OptionName, ShortOverwriteOptionName, OverwriteOptionName))\n\ndef ValidateOptionsDistinctFileNames(OptionName1, FilePath1, OptionName2, FilePath2):\n \"\"\"Validate two distinct file names.\n\n Arguments:\n OptionName1 (str): Command line option name.\n FilePath1 (str): Name of a file with complete file path.\n OptionName2 (str): Command line option name.\n FilePath2 (str): Name of a file with complete file path.\n\n Notes:\n The function exits with an error message for two non distinct file names.\n \n \"\"\"\n \n FilePath1Pattern = r\"^\" + re.escape(FilePath1) + r\"$\";\n if re.match(FilePath1Pattern, FilePath2, re.I):\n PrintError(\"The file name specified, %s, for options \\\"%s\\\" and \\\"%s\\\" must be different.\\n\" % (FilePath1, OptionName1, OptionName2))\n\ndef ProcessOptionInfileParameters(ParamsOptionName, ParamsOptionValue, InfileName = None, OutfileName = None):\n \"\"\"Process parameters for reading input files and return a map containing\n processed parameter names and values.\n \n Arguments:\n ParamsOptionName (str): Command line input parameters option name.\n ParamsOptionValues (str): Comma delimited list of parameter name and value pairs.\n InfileName (str): Name of input file.\n OutfileName (str): Name of output file.\n\n Returns:\n dictionary: Processed parameter name and value pairs.\n\n Notes:\n The parameter name and values specified in ParamsOptionValues are validated before\n returning them in a dictionary.\n\n \"\"\"\n \n ParamsInfo = {'RemoveHydrogens': True, 'Sanitize' : True, 'StrictParsing': True, 'SMILESColumn': 1, 'SMILESNameColumn' : 2, 'SMILESDelimiter': ' ', 'SMILESTitleLine': 'auto'}\n _ProcessInfileAndOutfileParameters('Infile', ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName,)\n \n return ParamsInfo\n\ndef ProcessOptionOutfileParameters(ParamsOptionName, ParamsOptionValue, InfileName = None, OutfileName = None):\n \"\"\"Process parameters for writing output files and return a map containing\n processed parameter names and values.\n \n Arguments:\n ParamsOptionName (str): Command line input parameters option name.\n ParamsOptionValues (str): Comma delimited list of parameter name and value pairs.\n InfileName (str): Name of input file.\n OutfileName (str): Name of output file.\n\n Returns:\n dictionary: Processed parameter name and value pairs.\n\n Notes:\n The parameter name and values specified in ParamsOptionValues are validated before\n returning them in a dictionary.\n\n The default value of some parameters may depend on type of input file. Consequently,\n the input file name is also needed.\n\n \"\"\"\n ParamsInfo = {'Compute2DCoords' : 'auto', 'Kekulize': False, 'SMILESDelimiter': ' ', 'SMILESIsomeric': True, 'SMILESTitleLine': True}\n _ProcessInfileAndOutfileParameters('Outfile', ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName)\n \n return ParamsInfo\n \ndef _ProcessInfileAndOutfileParameters(Mode, ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName):\n \"\"\"Process specified infile and outfile paramaters.\n \n \"\"\"\n if re.match(\"^auto$\", ParamsOptionValue, re.I):\n # No specific parameters to process except for parameters with possible auto value...\n _ProcessInfileAndOutfileAutoParameters(Mode, ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName)\n return\n \n ParamsOptionValue = re.sub(\" \", \"\", ParamsOptionValue)\n if not ParamsOptionValue:\n PrintError(\"No valid parameter name and value pairs specified using \\\"%s\\\" option\" % ParamsOptionName)\n \n ParamsOptionValueWords = ParamsOptionValue.split(\",\")\n if len(ParamsOptionValueWords) % 2:\n PrintError(\"The number of comma delimited paramater names and values, %d, specified using \\\"%s\\\" option must be an even number.\" % (len(ParamsOptionValueWords), ParamsOptionName))\n \n # Setup a canonical paramater names...\n ValidParamNames = []\n CanonicalParamNamesMap = {}\n for ParamName in sorted(ParamsInfo):\n ValidParamNames.append(ParamName)\n CanonicalParamNamesMap[ParamName.lower()] = ParamName\n \n # Validate paramater name and value pairs...\n for Index in range(0, len(ParamsOptionValueWords), 2):\n Name = ParamsOptionValueWords[Index]\n Value = ParamsOptionValueWords[Index + 1]\n\n CanonicalName = Name.lower()\n if not CanonicalName in CanonicalParamNamesMap:\n PrintError(\"The parameter name, %s, specified using \\\"%s\\\" is not a valid name. Supported parameter names: %s\" % (Name, ParamsOptionName, \" \".join(ValidParamNames)))\n\n ParamName = CanonicalParamNamesMap[CanonicalName]\n ParamValue = Value\n \n if re.match(\"^(Sanitize|StrictParsing|RemoveHydrogens|Kekulize|SMILESIsomeric)$\", ParamName, re.I):\n if not re.match(\"^(Yes|No|True|False)$\", Value, re.I):\n PrintError(\"The parameter value, %s, specified for parameter name, %s, using \\\"%s\\\" option is not a valid value. Supported values: Yes No True False\" % (Value, Name, ParamsOptionName))\n ParamValue = True\n if re.match(\"^(No|False)$\", Value, re.I):\n ParamValue = False\n elif re.match(\"^SMILESTitleLine$\", ParamName, re.I):\n if re.match(\"^Infile$\", Mode, re.I):\n if not re.match(\"^(Yes|No|True|False|Auto)$\", Value, re.I):\n PrintError(\"The parameter value, %s, specified for paramater name, %s, using \\\"%s\\\" option is not a valid value. Supported values: Yes No True False Auto\" % (Value, Name, ParamsOptionName))\n elif re.match(\"^Outfile$\", Mode, re.I):\n if not re.match(\"^(Yes|No|True|False)$\", Value, re.I):\n PrintError(\"The parameter value, %s, specified for parameter name, %s, using \\\"%s\\\" option is not a valid value. Supported values: Yes No True False\" % (Value, Name, ParamsOptionName))\n ParamValue = True\n if re.match(\"^(No|False)$\", Value, re.I):\n ParamValue = False\n elif re.match(\"^SMILESDelimiter$\", ParamName, re.I):\n if not re.match(\"^(space|tab|comma)$\", Value, re.I):\n PrintError(\"The parameter value, %s, specified for parameter name, %s, using \\\"%s\\\" option is not a valid value. Supported values: space tab comma\" % (Value, Name, ParamsOptionName))\n ParamValue = \" \"\n if re.match(\"^tab$\", Value, re.I):\n ParamValue = \"\\t\"\n elif re.match(\"^comma$\", Value, re.I):\n ParamValue = \",\"\n elif re.match(\"^Compute2DCoords$\", ParamName, re.I):\n # No need to set the value. It would be processed later to handle \"auto\" value...\n if not re.match(\"^(Yes|No|True|False|Auto)$\", Value, re.I):\n PrintError(\"The parameter value, %s, specified for paramater name, %s, using \\\"%s\\\" option is not a valid value. Supported values: Yes No True False Auto\" % (Value, Name, ParamsOptionName))\n else:\n ParamValue = int(Value)\n if ParamValue <= 0:\n PrintError(\"The parameter value, %s, specified for parameter name, %s, using \\\"%s\\\" option is not a valid value. Supported values: > 0\" % (Value, Name, ParamsOptionName))\n \n # Set value...\n ParamsInfo[ParamName] = ParamValue\n \n # Handle paramaters with possible auto values...\n _ProcessInfileAndOutfileAutoParameters(Mode, ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName)\n\ndef _ProcessInfileAndOutfileAutoParameters(Mode, ParamsInfo, ParamsOptionName, ParamsOptionValue, InfileName, OutfileName):\n \"\"\"Process parameters with possible auto values.\n \n \"\"\"\n if re.match(\"^Infile$\", Mode, re.I):\n # SMILESTitleLine parameter...\n Value = ParamsInfo[\"SMILESTitleLine\"]\n ParamValue = False\n if re.match(\"^auto$\", Value, re.I):\n if InfileName is not None:\n if CheckFileExt(InfileName, \"smi csv tsv txt\"):\n ParamValue = DoesSMILESFileContainTitleLine(InfileName)\n elif re.match(\"^(Yes|True)$\", Value, re.I):\n ParamValue = True\n ParamsInfo[\"SMILESTitleLine\"] = ParamValue\n elif re.match(\"^Outfile$\", Mode, re.I):\n # Compute2DCoords parameter...\n Value = ParamsInfo[\"Compute2DCoords\"]\n ParamValue = False\n if re.match(\"^auto$\", Value, re.I):\n if InfileName is not None:\n if CheckFileExt(InfileName, \"smi csv tsv txt\"):\n ParamValue = True\n if OutfileName is not None:\n if CheckFileExt(OutfileName, \"smi csv tsv txt\"):\n # No need to compute 2D coords for SMILES file...\n ParamValue = False\n elif re.match(\"^(Yes|True)$\", Value, re.I):\n ParamValue = True\n ParamsInfo[\"Compute2DCoords\"] = ParamValue\n\ndef ReplaceHTMLEntitiesInText(Text):\n \"\"\"Check and replace the followng HTML entities to their respective code\n for display in a browser: < (less than), > (greater than), & (ampersand),\n \" (double quote), and ' (single quote).\n\n Arguments:\n Text (str): Text value.\n\n Returns:\n str : Modifed text value.\n\n \"\"\"\n\n if re.search(\"\"\"(<|>|&|\"|')\"\"\", Text):\n return Text.replace(\"<\", \"<\").replace(\">\", \">\").replace(\"&\", \"&\").replace('\"', \""\").replace(\"'\",\"'\")\n else:\n return Text\n\ndef TruncateText(Text, Width, TrailingChars = \"...\"):\n \"\"\"Truncate text using specified width along with appending any trailing\n characters.\n \n Arguments:\n Text (string): Input text.\n Width (int): Max number of characters before truncating text.\n Delimiter (string): Trailing characters to append or None.\n\n Returns:\n str : Truncated text\n\n \"\"\"\n \n if len(Text) < Width:\n return Text\n \n TruncatedText = (Text[:Width] + TrailingChars) if not IsEmpty(TrailingChars) else Text[:Width] \n \n return TruncatedText\n\ndef WrapText(Text, Delimiter, Width):\n \"\"\"Wrap text using specified delimiter and width.\n \n Arguments:\n Text (string): Input text\n Delimiter (string): Delimiter for wrapping text\n Width (int): Max number of characters before wrapping text\n\n Returns:\n str : Wrapped text\n\n \"\"\"\n WrappedText = Delimiter.join(textwrap.wrap(Text, width = Width))\n \n return WrappedText\n","repo_name":"sirimullalab/KinasepKipred","sub_path":"mayachemtools/lib/Python/MiscUtil.py","file_name":"MiscUtil.py","file_ext":"py","file_size_in_byte":31015,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"73513019993","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_utils import uuidutils\nimport pytest\n\nfrom openstack_dashboard.test.selenium import widgets\n\n\n@pytest.fixture\ndef flavor_name():\n return 'horizon_flavor_%s' % uuidutils.generate_uuid(dashed=False)\n\n\n@pytest.fixture\ndef new_flavor(flavor_name, openstack_admin):\n flavor = openstack_admin.create_flavor(\n name=flavor_name,\n vcpus=1,\n ram=256,\n disk=1\n )\n yield flavor\n openstack_admin.delete_flavor(flavor_name)\n\n\n@pytest.fixture\ndef clear_flavor(flavor_name, openstack_admin):\n yield None\n openstack_admin.delete_flavor(flavor_name)\n\n\ndef test_create_flavor(login, driver, flavor_name, openstack_admin,\n config, clear_flavor):\n flavor_vcpus = 1\n flavor_ram = 256\n flavor_disk = 1\n\n login('admin')\n url = '/'.join((\n config.dashboard.dashboard_url,\n 'admin',\n 'flavors',\n ))\n driver.get(url)\n driver.find_element_by_link_text(\"Create Flavor\").click()\n flavors_form = driver.find_element_by_css_selector(\"form .modal-content\")\n flavors_form.find_element_by_id(\"id_name\").send_keys(flavor_name)\n flavors_form.find_element_by_id(\"id_vcpus\").send_keys(flavor_vcpus)\n flavors_form.find_element_by_id(\"id_memory_mb\").send_keys(flavor_ram)\n flavors_form.find_element_by_id(\"id_disk_gb\").send_keys(flavor_disk)\n flavors_form.find_element_by_css_selector(\n \".btn-primary[value='Create Flavor']\").click()\n messages = widgets.get_and_dismiss_messages(driver)\n assert f'Success: Created new flavor \"{flavor_name}\".' in messages\n flavor_sdk = openstack_admin.compute.find_flavor(flavor_name)\n assert flavor_sdk is not None\n assert (flavor_sdk.vcpus == flavor_vcpus and\n flavor_sdk.ram == flavor_ram and\n flavor_sdk.disk == flavor_disk)\n\n\ndef test_delete_flavor(login, driver, flavor_name, new_flavor, config,\n openstack_admin):\n login('admin')\n url = '/'.join((\n config.dashboard.dashboard_url,\n 'admin',\n 'flavors',\n ))\n driver.get(url)\n rows = driver.find_elements_by_css_selector(\n f\"table#flavors tr[data-display='{flavor_name}']\")\n assert len(rows) == 1\n actions_column = rows[0].find_element_by_css_selector(\"td.actions_column\")\n widgets.select_from_dropdown(actions_column, \"Delete Flavor\")\n widgets.confirm_modal(driver)\n messages = widgets.get_and_dismiss_messages(driver)\n assert f\"Success: Deleted Flavor: {flavor_name}\" in messages\n assert openstack_admin.compute.find_flavor(flavor_name) is None\n","repo_name":"openstack/horizon","sub_path":"openstack_dashboard/test/selenium/integration/test_flavors.py","file_name":"test_flavors.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":1314,"dataset":"github-code","pt":"5"} +{"seq_id":"73360330393","text":"import numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass TangentSpace(BaseEstimator, TransformerMixin):\n def __init__(self, rank, ref=None):\n \"\"\"Init.\"\"\"\n self.rank = rank\n self.ref = ref\n\n def fit(self, X, y=None):\n ref = self.ref\n if ref is None:\n # ref = mean_covs(X, rank=self.rank)\n ref = np.mean(X, axis=0)\n Y = to_quotient(ref, self.rank)\n self.reference_ = ref\n self.Y_ref_ = Y\n return self\n\n def transform(self, X, verbose=False):\n n_mat, n, _ = X.shape\n output = np.zeros((n_mat, n * self.rank))\n for j, C in enumerate(X):\n if verbose:\n print('\\r %d / %d' % (j+1, n_mat), end='', flush=True)\n Y = to_quotient(C, self.rank)\n output[j] = logarithm_(Y, self.Y_ref_).ravel()\n return output\n\n\ndef to_quotient(C, rank):\n d, U = np.linalg.eigh(C)\n U = U[:, -rank:]\n d = d[-rank:]\n Y = U * np.sqrt(d)\n return Y\n\n\ndef distance2(S1, S2, rank=None):\n Sq = sqrtm(S1, rank)\n P = sqrtm(np.dot(Sq, np.dot(S2, Sq)), rank)\n return np.trace(S1) + np.trace(S2) - 2 * np.trace(P)\n\n\ndef mean_covs(covmats, rank, tol=10e-4, maxiter=50, init=None,\n sample_weight=None):\n Nt, Ne, Ne = covmats.shape\n if sample_weight is None:\n sample_weight = np.ones(Nt)\n if init is None:\n C = np.mean(covmats, axis=0)\n else:\n C = init\n k = 0\n K = sqrtm(C, rank)\n crit = np.finfo(np.float64).max\n # stop when J<10^-9 or max iteration = 50\n while (crit > tol) and (k < maxiter):\n k = k + 1\n\n J = np.zeros((Ne, Ne))\n\n for index, Ci in enumerate(covmats):\n tmp = np.dot(np.dot(K, Ci), K)\n J += sample_weight[index] * sqrtm(tmp)\n\n Knew = sqrtm(J, rank)\n crit = np.linalg.norm(Knew - K, ord='fro')\n K = Knew\n if k == maxiter:\n print('Max iter reach')\n C = np.dot(K, K)\n return C\n\n\ndef sqrtm(C, rank=None):\n if rank is None:\n rank = C.shape[0]\n d, U = np.linalg.eigh(C)\n U = U[:, -rank:]\n d = d[-rank:]\n return np.dot(U, np.sqrt(np.abs(d))[:, None] * U.T)\n\n\ndef logarithm_(Y, Y_ref):\n prod = np.dot(Y_ref.T, Y)\n U, D, V = np.linalg.svd(prod, full_matrices=False)\n Q = np.dot(U, V).T\n return np.dot(Y, Q) - Y_ref\n\n\nif __name__ == '__main__':\n rng = np.random.RandomState(0)\n n_mat = 10\n n = 10\n p = 5\n eps = 1e-2\n Y = rng.randn(n, p)\n C_ref = Y.dot(Y.T)\n X = np.zeros((n_mat, n, n))\n for i in range(n_mat):\n Y_ = Y + eps * rng.randn(n, p)\n X[i] = Y_.dot(Y_.T)\n ts = TangentSpace(p)\n ts.fit(X, ref=C_ref)\n X_t = ts.transform(X)\n D_m = np.zeros(n_mat)\n D_T = np.zeros(n_mat)\n for i in range(n_mat):\n D_m[i] = X_t[i].dot(X_t[i])\n D_T[i] = distance2(X[i], C_ref, p)\n print(np.mean((D_m - D_T) / D_m.max()))\n","repo_name":"hichamjanati/retreat2019","sub_path":"script_expe/wasserstein_tangent.py","file_name":"wasserstein_tangent.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37291393202","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\nfrom moviepy.editor import VideoFileClip, concatenate_videoclips\n\n# Get the path to the input file from the command line argument\nif len(sys.argv) < 2:\n print(\"Please provide the path to the input MP4 file as an argument\")\n exit(1)\n\ninput_file_path = sys.argv[1]\n# Derive the output file path\nfilename = os.path.basename(input_file_path)\noutput_dir = os.path.expanduser(\"~/tmp\")\n\ntmp_file_path_0 = os.path.join(output_dir, f\"tmp.0.{filename}\")\ntmp_file_path_1 = os.path.join(output_dir, f\"tmp.1.{filename}\")\noutput_file_path = os.path.join(output_dir, f\"looped.smoothed.{filename}\")\n\n# Prompt the user for the slowdown multiplier\ndefault_slowdown = 2\nslowdown = input(f\"Enter slowdown multiplier (default {default_slowdown}): \")\nslowdown = float(slowdown) if slowdown else default_slowdown\n\n# Prompt the user for the number of times to loop the video\ndefault_loops = 10\nloops = input(f\"Enter number of loops (default {default_loops}): \")\nloops = int(loops) if loops else default_loops\n\n\n# convert mp4 file to smooth mp4 format with minterpolate filter\ncommand3 = (\n f'ffmpeg -y -i {input_file_path} -filter:v \"setpts=1.2*PTS\" {tmp_file_path_0}'\n)\nsubprocess.run(command3, shell=True, check=True)\n\ncommand4 = (\n f'ffmpeg -y -i {tmp_file_path_0} -crf 10 '\n + f' -vf \"minterpolate=fps=60:mi_mode=mci:mc_mode=aobmc:me_mode=bidir:vsbmc=1\"'\n + f' {tmp_file_path_1}'\n)\nsubprocess.run(command4, shell=True, check=True)\n\n# Apply slowdown and looping\nclip = VideoFileClip(tmp_file_path_1)\n\nclip = clip.fx(clip.speedx, slowdown)\nclips = [clip] * loops\nfinal_clip = concatenate_videoclips(clips)\n\n# Write the output file\nfinal_clip.write_videofile(output_file_path, fps=clip.fps)\n\nos.remove(tmp_file_path_0)\nos.remove(tmp_file_path_1)\n\nprint(f\"Done! {output_file_path}\")\n","repo_name":"sth144/.workflow","sub_path":"src/utils/shared/video/smooth_loop_mp4.py","file_name":"smooth_loop_mp4.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72108804311","text":"import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas\nmatplotlib.use('TkAgg')\nimport seaborn\n\ndef plot(data, selections=None, **kwargs):\n\n if not selections:\n selections = data.colnames\n selections = [i for i in selections if i not in ['time']]\n else:\n assert isinstance(selections, list)\n\n df = pandas.DataFrame(data, columns=data.colnames)\n df = df[selections]\n\n fig = plt.figure()\n\n if kwargs.get('title'):\n plt.title(kwargs['title'])\n\n for i in selections:\n plt.plot(data['time'], df[i], label=i)\n\n plt.legend(loc='best')\n # plt.legend(loc=(1, 0.1))\n seaborn.despine(fig=fig, top=True, right=True)\n plt.xlabel('Time(s)')\n plt.ylabel('Concentration nmol/ml')\n\n\nFUNCTIONS = \"\"\"\n\nfunction MM(km, Vmax, S)\n Vmax * S / (km + S)\n end\n\n function MMWithKcat(km, kcat, S, E)\n kcat * E * S / (km + S)\n end\n\n function NonCompetitiveInhibition(km, ki, Vmax, n, I, S)\n Vmax * S / ( (km + S) * (1 + (I / ki)^n ) )\n end\n\n function MA1(k, S)\n k * S\n end\n\n function MA2(k, S1, S2)\n k * S1 * S2\n end\n\n function MA1Mod(k, S, M)\n k * S * M\n end\n\n function MA2Mod(k, S1, S2, M)\n k * S1 * S2 * M\n end\n\n function CompetitiveInhibitionWithKcat(km, ki, kcat, E, I, S)\n (kcat * E * S) / (km + S + ((km * I )/ ki) )\n end \n\n function CompetitiveInhibition(Vmax, km, ki, I, S)\n Vmax * S / (km + S + ((km * I )/ ki) )\n end\n\n function Hill(km, beta, n, X)\n beta * X^n / (km*n + X*n)\n end\n\n function HillWithKcat(km, kcat, n, X, E)\n kcat*E* X^n / (km*n + X*n)\n end\n\"\"\"\n\n\ndef list_attrs(x):\n for i in sorted(dir(x)):\n print(i)","repo_name":"CiaranWelsh/TeachingMaterials","sub_path":"ODEModels/tasks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22341392614","text":"import json\nimport facebook\nimport re\nfrom collections import OrderedDict\n\ndef returnIds(listofIds):\n idsDict = OrderedDict(listofIds)\n ids = json.dumps(listofIds,indent=4)\n\n #ids = ''.join(i for i in ids if i.isdigit())\n #ids = re.sub(\"(.{64})\", \"\\\\1\\n\", ids, 0, re.DOTALL)\n #ids = re.sub(\"\\D\", \"\", ids)\n\n\n return ids\n\ndef main():\n token = {'EAAKsaVifZCCYBACB1mPP3935nmW5Yps9ZCG2Kl9IIl8sZCeSEf5dT79MmDqXfRCL24OhdjSfTBUwQZCZAcX6A0Y7dTu6WCpW3xrjWI4ZBKuDAZBGT4T58Y3sgNseZBmjLFZBiD9ZBbJoT9ECWdd5tFZBZBPyY4R4rjzFPxzxqDyKHLXwsyhNJ3D6hhDknNBylIaZBJgxuMxyzX5NjZCwZDZD'}\n graph = facebook.GraphAPI(token)\n pagesLiked = graph.get_object('me',fields='likes{id}')\n pageIds = returnIds(pagesLiked)\n\n #print(json.dumps(pagesLiked,indent=4))\n\nif __name__ == \"__main__\":\n main()","repo_name":"OdincoGaming/Text-Posting","sub_path":"oldscripts/TalkBot/FacebookBot/FacebookBot.py","file_name":"FacebookBot.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14490893924","text":"from sys import exit\nimport pygame as pg\nfrom time import time\nfrom config import FPS\nfrom game import Game\n\npg.init()\nscreen = pg.display.set_mode((0, 0), pg.FULLSCREEN)\npg.display.set_caption('Medieval Apocalypse')\nscreen_width = screen.get_width()\nscreen_height = screen.get_height()\n\nclock = pg.time.Clock()\n\ngame = Game(screen, 'menu')\n\nlast_time = time()\n\nwhile True:\n\tmouse_down = False\n\tmouse_pos = pg.mouse.get_pos()\n\n\tfor event in pg.event.get():\n\t\tif event.type == pg.QUIT:\n\t\t\tpg.quit()\n\t\t\texit()\n\t\tif event.type == pg.MOUSEBUTTONUP:\n\t\t\tmouse_down = True\n\n\tkeys = pg.key.get_pressed()\n\n\tdt = time() - last_time\n\tlast_time = time()\n\n\tgame.run(dt, keys, mouse_down, mouse_pos)\n\n\tif not game.running:\n\t\tbreak\n\n\tpg.display.flip()\n\tclock.tick(FPS)\n\npg.quit()\nexit()\n","repo_name":"aronLev2065/midieval-apocalypse-game","sub_path":"data/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23635134669","text":"import csv\nimport os\nfrom datetime import datetime\nfrom download_images_from_iicanada import download_images\nfrom download_images_from_iicanada import Player, Team\n\nbase_folder = \"2023\"\n\ninput_file = f'{base_folder}/gold_cup_2023__girls_15_17_player_registration.csv'\n\n# Indexes at which the data begins. The Indexes before this are not useful.\ncolumn_start_index = 9\nrow_start_index = 3\n\n# Create a list to store the processed data\nprocessed_data = []\n\ndata_dir_for_teams_to_validate = []\n\nplayers = list[Player]()\n\n\ndef convert_date_of_birth(date_str):\n # check the input format\n if '/' in date_str:\n # convert from '%m/%d/%Y' format to '%Y/%m/%d' format\n date_obj = datetime.strptime(date_str, '%m/%d/%Y')\n new_date_str = datetime.strftime(date_obj, '%Y/%m/%d')\n elif '-' in date_str:\n # convert from '%Y-%m-%d' format to '%Y/%m/%d' format\n date_obj = datetime.strptime(date_str, '%Y-%m-%d')\n new_date_str = datetime.strftime(date_obj, '%Y/%m/%d')\n else:\n # invalid format\n raise ValueError(f\"Invalid date format for {date_str}. Use either '%m/%d/%Y' or '%Y-%m-%d'.\")\n return new_date_str\n\n\n# Read the input CSV file\nwith open(input_file, 'r') as file:\n reader = csv.reader(file)\n\n # Skip useless rows\n for row in range(row_start_index):\n next(reader)\n\n players = list[Player]()\n for row in reader:\n row = row[column_start_index:]\n # Extract the required values from the input row\n player_full_name = row[0]\n category = \"Girls 15-17\"\n player_date_of_birth = row[1]\n player_email_address = row[2]\n player_phone_number = row[3]\n headshot = row[13]\n govt_id = row[15]\n player = Player(\n full_name=player_full_name,\n dob=convert_date_of_birth(player_date_of_birth),\n email_address=player_email_address,\n headshot=headshot,\n govt_id=govt_id\n )\n players.append(player)\n print(vars(player))\n\n team_name = \"Free Agents\"\n output_file = f'{base_folder}/{team_name}-{category}/{team_name}-{category}-output.csv'\n\n if not os.path.exists(f\"{base_folder}/{team_name}-{category}\"):\n os.makedirs(f\"{base_folder}/{team_name}-{category}\")\n\n with open(output_file, 'w', newline='') as file:\n writer = csv.writer(file)\n\n writer.writerow(['Teamname', 'TeamShortName', 'PlayerName', 'email'])\n for player in players:\n player_full_name = player.full_name\n\n writer.writerow([\"Free Agent\", 'FCB', player.full_name, player.email_address])\n\nteam = Team(team_name, category, players)\ndownload_images(teams=[team])\n","repo_name":"adiljiwani/gold-cup-tools","sub_path":"free_agent_reg_output_to_klubsoft_format.py","file_name":"free_agent_reg_output_to_klubsoft_format.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14643860967","text":"import unittest\nfrom io import StringIO\nimport sys\n\n\nclass StreamCaptureTest(unittest.TestCase):\n def assertWasStreamed(self, s):\n self.sout.seek(0)\n self.assertEqual(self.sout.read(), s)\n\n def setUp(self):\n self._stdout = sys.stdout\n self._stdin = sys.stdin\n self.sout = StringIO()\n self.sin = StringIO()\n sys.stdout = self.sout\n sys.stdin = self.sin\n\n def tearDown(self):\n sys.stdout = self._stdout\n sys.stdin = self._stdin\n","repo_name":"debtsy/rollout","sub_path":"test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28842318495","text":"import pytest\nimport asyncio\n\npytest_plugins = [\n 'shared.infrastructure.pytest.fixtures',\n 'shoes.infrastructure.pytest.fixtures',\n]\n\n\n@pytest.fixture(scope='session')\ndef event_loop():\n \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n policy = asyncio.get_event_loop_policy()\n res = policy.new_event_loop()\n asyncio.set_event_loop(res)\n res._close = res.close\n res.close = lambda: None\n\n yield res\n\n res._close()\n","repo_name":"soulcodex/fastapi-ddd-atlantica-workshop","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"7429999011","text":"#Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Axiom_AIR_25_49_61/Axiom_AIR_25_49_61.py\nfrom __future__ import with_statement\nimport Live\nfrom Live import MidiMap\nfrom _Framework.ControlSurface import ControlSurface\nfrom _Framework.InputControlElement import InputControlElement, MIDI_CC_TYPE, MIDI_NOTE_TYPE\nfrom _Framework.ButtonElement import ButtonElement\nfrom _Framework.PhysicalDisplayElement import PhysicalDisplayElement\nfrom _Framework.DisplayDataSource import DisplayDataSource\nfrom _Framework.TransportComponent import TransportComponent\nfrom ConfigurableButtonElement import ConfigurableButtonElement\nfrom IdentifyingEncoderElement import IdentifyingEncoderElement\nfrom NumericalDisplayElement import NumericalDisplayElement\nfrom FaderModeSelector import FaderModeSelector\nfrom FaderButtonModeSelector import FaderButtonModeSelector\nfrom SingleFaderButtonModeSelector import SingleFaderButtonModeSelector\nfrom EncoderModeSelector import EncoderModeSelector\nfrom MainModeSelector import MainModeSelector\nfrom DeviceNavComponent import DeviceNavComponent\nfrom SpecialSessionComponent import SpecialSessionComponent\nfrom SpecialMixerComponent import SpecialMixerComponent\nfrom BestBankDeviceComponent import BestBankDeviceComponent\nfrom TransportViewModeSelector import TransportViewModeSelector\nfrom consts import *\n\ndef create_configurable_button(identifier, name, send_channel_offset = 0, identifier_send_offset = 0, send_msg_type = None):\n button = ConfigurableButtonElement(IS_MOMENTARY, MIDI_CC_TYPE, GLOBAL_CHANNEL, identifier, GLOBAL_SEND_CHANNEL + send_channel_offset, identifier_send_offset, send_msg_type)\n button.name = name\n return button\n\n\ndef create_button(identifier, name):\n button = ButtonElement(IS_MOMENTARY, MIDI_CC_TYPE, GLOBAL_CHANNEL, identifier)\n button.name = name\n return button\n\n\ndef create_encoder(identifier, name):\n encoder = IdentifyingEncoderElement(MIDI_CC_TYPE, GLOBAL_CHANNEL, identifier, MidiMap.MapMode.relative_smooth_two_compliment, 12)\n encoder.name = name\n encoder.set_feedback_delay(-1)\n return encoder\n\n\ndef create_slider(identifier, name):\n slider = IdentifyingEncoderElement(MIDI_CC_TYPE, GLOBAL_CHANNEL, identifier, MidiMap.MapMode.absolute)\n slider.name = name\n slider.set_feedback_delay(-1)\n return slider\n\n\nclass Axiom_AIR_25_49_61(ControlSurface):\n \"\"\" Script for the M-Audio Axiom A.I.R. 25, 49 and 61 \"\"\"\n\n def __init__(self, c_instance):\n ControlSurface.__init__(self, c_instance)\n self._alt_device_component = None\n with self.component_guard():\n self.set_pad_translations(PAD_TRANSLATIONS)\n self._device_selection_follows_track_selection = True\n self._suggested_input_port = 'HyperControl'\n self._suggested_output_port = 'HyperControl'\n self._single_fader_button_modes = None\n self._has_faders = True\n self._display_reset_delay = -1\n self._hc_byte = HC_BYTE\n self._waiting_for_first_response = True\n self._setup_controls()\n self._setup_displays()\n self._setup_mixer()\n self._setup_session()\n self._setup_transport()\n self._setup_device()\n self._setup_modes()\n self._drum_group_midi_button = None\n self._drum_group_hyper_button = None\n for component in self.components:\n component.set_enabled(False)\n\n def disconnect(self):\n self._scheduled_messages = []\n for encoder in self._encoders:\n encoder.remove_value_listener(self._encoder_value)\n\n for fader in self._faders:\n fader.remove_value_listener(self._fader_value)\n\n for fader_button in self._fader_buttons:\n fader_button.remove_value_listener(self._fader_button_value)\n\n self._master_fader.remove_value_listener(self._fader_value)\n self._master_fader_button.remove_value_listener(self._fader_button_value)\n self._select_button.remove_value_listener(self._select_button_value)\n self._identify_button.remove_value_listener(self._identify_value)\n self._fader_group_midi_button.remove_value_listener(self._midi_button_value)\n self._fader_group_mix_button.remove_value_listener(self._hyper_button_value)\n self._fader_group_fx_button.remove_value_listener(self._hyper_button_value)\n self._encoder_group_midi_button.remove_value_listener(self._midi_button_value)\n self._encoder_group_mix_button.remove_value_listener(self._hyper_button_value)\n self._encoder_group_fx_button.remove_value_listener(self._hyper_button_value)\n if self._drum_group_midi_button != None:\n self._drum_group_midi_button.remove_value_listener(self._midi_button_value)\n if self._drum_group_hyper_button != None:\n self._drum_group_hyper_button.remove_value_listener(self._hyper_button_value)\n self._alt_device_component = None\n self._name_display = None\n self._value_display = None\n self._bank_display = None\n self._pad_display = None\n self._name_display_data_source = None\n self._value_display_data_source = None\n self._bank_display_data_source = None\n self._pad_display_data_source = None\n self._select_button = None\n self._left_button = None\n self._right_button = None\n self._up_button = None\n self._down_button = None\n self._loop_button = None\n self._ffwd_button = None\n self._rwd_button = None\n self._play_button = None\n self._stop_button = None\n self._rec_button = None\n self._master_fader_button = None\n self._fader_buttons = None\n self._faders = None\n self._encoders = None\n self._drum_pads = None\n self._identify_button = None\n self._main_group_hyper_button = None\n self._main_group_track_button = None\n self._main_group_fx_button = None\n self._encoder_group_midi_button = None\n self._encoder_group_mix_button = None\n self._encoder_group_fx_button = None\n self._fader_group_mode_button = None\n self._fader_group_midi_button = None\n self._fader_group_mix_button = None\n self._fader_group_fx_button = None\n self._drum_group_midi_button = None\n self._drum_group_roll_button = None\n self._drum_group_hyper_button = None\n self._mixer_for_encoders = None\n self._mixer_for_faders = None\n self._device_for_encoders = None\n self._device_for_faders = None\n self._transport = None\n self._session = None\n ControlSurface.disconnect(self)\n self._send_midi(SYSEX_START + DISABLE_HYPERCONTROL)\n\n def refresh_state(self):\n ControlSurface.refresh_state(self)\n self.schedule_message(5, self._send_midi, IDENTITY_REQUEST)\n\n def handle_sysex(self, midi_bytes):\n if midi_bytes[0:10] == AXIOM_AIR_RESPONSE:\n if midi_bytes[12:15] < AXIOM_REV4_RESPONSE:\n self.schedule_message(1, self._send_midi, SYSEX_START + ENGAGE_HYPERCONTROL)\n self.schedule_message(2, self._send_midi, SYSEX_START + CLEAR_ALL)\n self.schedule_message(3, self._name_display.display_message, 'Firmware')\n self.schedule_message(13, self._name_display.display_message, 'Update')\n self.schedule_message(23, self._name_display.display_message, 'Required')\n self.schedule_message(33, self._send_midi, SYSEX_START + DISABLE_HYPERCONTROL)\n elif midi_bytes[12:15] >= AXIOM_REV4_RESPONSE:\n if self._waiting_for_first_response == True:\n self._waiting_for_first_response = False\n self._has_faders = midi_bytes[10] != 50\n self.schedule_message(1, self._send_midi, SYSEX_START + ENGAGE_HYPERCONTROL)\n self.schedule_message(2, self._send_midi, SYSEX_START + SPECIAL_HYPERCONTROL)\n self.schedule_message(3, self._complete_setup)\n else:\n self._display_reset_delay = 0\n elif midi_bytes[0:8] == REQUEST_HYPERCONTROL:\n self.schedule_message(5, self._send_midi, IDENTITY_REQUEST)\n\n def update_display(self):\n ControlSurface.update_display(self)\n if self._display_reset_delay >= 0:\n self._display_reset_delay -= 1\n if self._display_reset_delay == -1:\n self._set_displays_to_default()\n\n def _on_selected_track_changed(self):\n ControlSurface._on_selected_track_changed(self)\n self._display_reset_delay = 0\n\n def restore_bank(self, bank_index):\n ControlSurface.restore_bank(self, bank_index)\n if self._alt_device_component != None:\n self._alt_device_component.restore_bank(bank_index)\n\n def set_appointed_device(self, device):\n ControlSurface.set_appointed_device(self, device)\n with self.component_guard():\n if self._alt_device_component != None:\n self._alt_device_component.set_device(device)\n\n def set_alt_device_component(self, device_component):\n self._alt_device_component = device_component\n\n def _update_device_selection(self):\n track = self.song().view.selected_track\n device_to_select = track.view.selected_device\n if device_to_select == None and len(track.devices) > 0:\n device_to_select = track.devices[0]\n if device_to_select != None:\n self.song().view.select_device(device_to_select)\n self._device_component.set_device(device_to_select)\n if self._alt_device_component != None:\n self._alt_device_component.set_device(device_to_select)\n\n def _setup_controls(self):\n self._left_button = create_button(99, 'Left_Button')\n self._right_button = create_button(100, 'Right_Button')\n self._up_button = create_button(101, 'Up_Button')\n self._down_button = create_button(102, 'Down_Button')\n self._loop_button = create_button(113, 'Loop_Button')\n self._rwd_button = create_button(114, 'Rwd_Button')\n self._ffwd_button = create_button(115, 'FFwd_Button')\n self._stop_button = create_button(116, 'Stop_Button')\n self._play_button = create_button(117, 'Play_Button')\n self._rec_button = create_button(118, 'Record_Button')\n self._select_button = ConfigurableButtonElement(IS_MOMENTARY, MIDI_CC_TYPE, GLOBAL_CHANNEL, 98)\n self._select_button.name = 'Select_Button'\n self._select_button.add_value_listener(self._select_button_value)\n self._main_group_hyper_button = create_configurable_button(104, 'Fader_Group_HyperControl_Button', 2, 14)\n self._main_group_track_button = create_configurable_button(105, 'Main_Group_Track_Button', 2, 11)\n self._main_group_fx_button = create_configurable_button(106, 'Main_Group_Inst_FX_Button', 2, 11)\n self._identify_button = create_configurable_button(97, 'Identify_Button', 2, 16)\n self._identify_button.add_value_listener(self._identify_value)\n self._fader_buttons = []\n for index in range(8):\n self._fader_buttons.append(create_configurable_button(49 + index, 'Fader_Button_%d' % index))\n self._fader_buttons[-1].add_value_listener(self._fader_button_value, identify_sender=True)\n\n self._faders = []\n for index in range(8):\n self._faders.append(create_slider(33 + index, 'Fader_%d' % index))\n self._faders[-1].add_value_listener(self._fader_value, identify_sender=True)\n\n self._master_fader_button = create_configurable_button(57, 'Master_Fader_Button')\n self._master_fader_button.add_value_listener(self._fader_button_value, identify_sender=True)\n self._master_fader = create_slider(41, 'Master_Fader')\n self._master_fader.add_value_listener(self._fader_value, identify_sender=True)\n self._fader_group_mode_button = create_configurable_button(61, 'Fader_Group_Mode_Button')\n self._fader_group_midi_button = create_configurable_button(60, 'Fader_Group_MIDI_Button')\n self._fader_group_midi_button.add_value_listener(self._midi_button_value, identify_sender=True)\n self._fader_group_mix_button = create_configurable_button(58, 'Fader_Group_Mix_Button', 0, 1)\n self._fader_group_mix_button.add_value_listener(self._hyper_button_value, identify_sender=True)\n self._fader_group_fx_button = create_configurable_button(59, 'Fader_Group_Inst_FX_Button', 0, -1)\n self._fader_group_fx_button.add_value_listener(self._hyper_button_value, identify_sender=True)\n self._encoders = []\n for index in range(8):\n self._encoders.append(create_encoder(17 + index, 'Encoder_%d' % index))\n self._encoders[-1].add_value_listener(self._encoder_value, identify_sender=True)\n\n self._encoder_group_midi_button = create_configurable_button(27, 'Encoder_Group_MIDI_Button', 0, 72)\n self._encoder_group_midi_button.add_value_listener(self._midi_button_value, identify_sender=True)\n self._encoder_group_mix_button = create_configurable_button(25, 'Encoder_Group_Mix_Button', 0, 72)\n self._encoder_group_mix_button.add_value_listener(self._hyper_button_value, identify_sender=True)\n self._encoder_group_fx_button = create_configurable_button(26, 'Encoder_Group_Inst_FX_Button', 0, 72)\n self._encoder_group_fx_button.add_value_listener(self._hyper_button_value, identify_sender=True)\n\n def _setup_drum_pads(self):\n self._drum_pads = []\n num_pads = 12 if self._has_faders else 16\n for index in range(8):\n self._drum_pads.append(create_configurable_button(81 + index, 'Pad_%d' % index, 0, 0, MIDI_CC_TYPE))\n\n for index in range(num_pads - 8):\n self._drum_pads.append(ConfigurableButtonElement(IS_MOMENTARY, MIDI_NOTE_TYPE, GLOBAL_CHANNEL - 1, 81 + index, GLOBAL_SEND_CHANNEL, 8, MIDI_CC_TYPE))\n self._drum_pads[-1].name = 'Pad_' + str(index + 8)\n\n self._drum_group_midi_button = create_configurable_button(91, 'Drum_Group_MIDI_Button', 2, -2)\n self._drum_group_midi_button.add_value_listener(self._midi_button_value, identify_sender=True)\n self._drum_group_roll_button = create_configurable_button(90, 'Drum_Group_Roll_Button', -1)\n self._drum_group_hyper_button = create_configurable_button(89, 'Drum_Group_HyperControl_Button', 2, 2)\n self._drum_group_hyper_button.add_value_listener(self._hyper_button_value, identify_sender=True)\n\n def _setup_displays(self):\n self._name_display = PhysicalDisplayElement(12, 1)\n self._name_display.name = 'Name_Display'\n self._name_display.set_message_parts(SYSEX_START + (21,), (0, 247))\n self._name_display.set_clear_all_message(CLEAR_NAME)\n self._name_display_data_source = DisplayDataSource()\n self._name_display.segment(0).set_data_source(self._name_display_data_source)\n self._value_display = NumericalDisplayElement(3, 1)\n self._value_display.name = 'Value_Display'\n self._value_display.set_message_parts(SYSEX_START + (20, 48), (0, 247))\n self._value_display.set_clear_all_message(CLEAR_VALUE)\n self._value_display_data_source = DisplayDataSource()\n self._value_display.segment(0).set_data_source(self._value_display_data_source)\n self._bank_display = NumericalDisplayElement(3, 1)\n self._bank_display.name = 'Bank_Display'\n self._bank_display.set_message_parts(SYSEX_START + (19,), (0, 247))\n self._bank_display.set_clear_all_message(CLEAR_BANK)\n self._bank_display_data_source = DisplayDataSource()\n self._bank_display.segment(0).set_data_source(self._bank_display_data_source)\n self._pad_display = NumericalDisplayElement(2, 1)\n self._pad_display.name = 'Pad_Display'\n self._pad_display.set_message_parts(SYSEX_START + (18,), (0, 247))\n self._pad_display.set_clear_all_message(CLEAR_PAD)\n self._pad_display_data_source = DisplayDataSource()\n self._pad_display.segment(0).set_data_source(self._pad_display_data_source)\n\n def _setup_mixer(self):\n self._mixer_for_encoders = SpecialMixerComponent(self._name_display, self._value_display, 8)\n self._mixer_for_encoders.name = 'Mixer_for_encoders'\n self._mixer_for_faders = SpecialMixerComponent(self._name_display, self._value_display, 8)\n self._mixer_for_faders.name = 'Mixer_for_faders'\n\n def _setup_session(self):\n self._session = SpecialSessionComponent(8, 0)\n self._session.name = 'Session_Control'\n self._session.selected_scene().name = 'Selected_Scene'\n self._session.set_mixer(self._mixer_for_encoders)\n self._session.set_alt_mixer(self._mixer_for_faders)\n self._session.add_offset_listener(self._update_bank_value)\n\n def _setup_transport(self):\n self._transport = TransportComponent()\n self._transport.name = 'Transport'\n self._transport.set_stop_button(self._stop_button)\n self._transport.set_play_button(self._play_button)\n self._transport.set_record_button(self._rec_button)\n transport_view_modes = TransportViewModeSelector(self._transport, self._session, self._ffwd_button, self._rwd_button, self._loop_button)\n transport_view_modes.name = 'Transport_View_Modes'\n\n def _setup_device(self):\n self._device_for_encoders = BestBankDeviceComponent()\n self._device_for_encoders.name = 'Device_Component_for_encoders'\n self._device_for_faders = BestBankDeviceComponent()\n self._device_for_faders.name = 'Device_Component_for_faders'\n self.set_device_component(self._device_for_encoders)\n self.set_alt_device_component(self._device_for_faders)\n self._device_nav = DeviceNavComponent()\n self._device_nav.name = 'Device_Nav_Component'\n\n def _setup_modes(self):\n self._fader_button_modes = FaderButtonModeSelector(self._mixer_for_faders, tuple(self._fader_buttons))\n self._fader_button_modes.name = 'Fader_Button_Modes'\n self._fader_button_modes.set_mode_toggle(self._fader_group_mode_button)\n self._fader_modes = FaderModeSelector(self._mixer_for_faders, self._device_for_faders, tuple(self._faders), self._fader_button_modes, self._master_fader_button)\n self._fader_modes.name = 'Fader_Modes'\n self._fader_modes.set_mode_buttons((self._fader_group_mix_button, self._fader_group_fx_button))\n self._encoder_modes = EncoderModeSelector(self._mixer_for_encoders, self._device_for_encoders, tuple(self._encoders))\n self._encoder_modes.name = 'Encoder_Modes'\n self._encoder_modes.set_mode_buttons((self._encoder_group_mix_button, self._encoder_group_fx_button))\n main_modes = MainModeSelector(self._device_for_encoders, self._device_for_faders, self._session, self._mixer_for_faders, self._device_nav, self._up_button, self._down_button, self._left_button, self._right_button, self._select_button)\n main_modes.name = 'Main_Modes'\n main_modes.set_mode_buttons((self._main_group_track_button, self._main_group_fx_button))\n\n def _setup_master_fader(self):\n if self._has_faders:\n self._mixer_for_encoders.master_strip().set_volume_control(self._master_fader)\n else:\n self._mixer_for_encoders.selected_strip().set_volume_control(self._master_fader)\n\n def _setup_single_fader_button_modes(self):\n self._single_fader_button_modes = SingleFaderButtonModeSelector(self._mixer_for_encoders, self._fader_group_midi_button)\n self._single_fader_button_modes.name = 'Single_Fader_Button_Modes'\n self._single_fader_button_modes.set_mode_toggle(self._fader_group_mode_button)\n\n def _complete_setup(self):\n self._setup_drum_pads()\n self._set_drum_pads_to_hc()\n self._setup_master_fader()\n if not self._has_faders:\n self._setup_single_fader_button_modes()\n for control in self.controls:\n if isinstance(control, InputControlElement):\n control.clear_send_cache()\n\n for component in self.components:\n component.set_enabled(True)\n\n self._fader_group_midi_button.send_value(LED_OFF, True)\n self._encoder_group_midi_button.send_value(LED_OFF, True)\n self._main_group_hyper_button.send_value(AMB_FULL, True)\n self.request_rebuild_midi_map()\n self._on_selected_track_changed()\n self.schedule_message(1, self._show_startup_message)\n\n def _show_startup_message(self):\n self._send_midi(SYSEX_START + CLEAR_ALL)\n self._name_display.display_message('Ableton Live')\n self._display_reset_delay = INITIAL_DISPLAY_DELAY\n\n def _select_button_value(self, value):\n self._display_reset_delay = STANDARD_DISPLAY_DELAY\n\n def _identify_value(self, value):\n for encoder in self._encoders:\n encoder.set_identify_mode(value > 0)\n\n for fader in self._faders:\n fader.set_identify_mode(value > 0)\n\n self._master_fader.set_identify_mode(value > 0)\n self._display_reset_delay = 0\n self._identify_button.turn_on() if value > 0 else self._identify_button.turn_off()\n\n def _midi_button_value(self, value, sender):\n if value > 0:\n if sender is self._drum_group_midi_button:\n hc_byte = self._hc_byte ^ PADS\n if hc_byte != self._hc_byte:\n self._hc_byte = hc_byte\n self._drum_group_hyper_button.send_value(LED_OFF, True)\n self.schedule_message(1, self._send_midi, SYSEX_START + (32, self._hc_byte, 247))\n elif sender is self._encoder_group_midi_button:\n hc_byte = self._hc_byte ^ ENCODERS\n if hc_byte != self._hc_byte:\n self._hc_byte = hc_byte\n self._encoder_group_mix_button.send_value(LED_OFF, True)\n self._encoder_group_fx_button.send_value(LED_OFF, True)\n if self._encoder_modes.mode_index < 3:\n self._encoder_modes.set_enabled(False)\n self.schedule_message(1, self._send_midi, SYSEX_START + (32, self._hc_byte, 247))\n elif sender is self._fader_group_midi_button:\n if self._has_faders:\n hc_byte = self._hc_byte ^ FADERS\n if hc_byte != self._hc_byte:\n self._hc_byte = hc_byte\n self._fader_group_mix_button.send_value(LED_OFF, True)\n self._fader_group_fx_button.send_value(LED_OFF, True)\n self._fader_group_mode_button.send_value(LED_OFF, True)\n if self._fader_modes.mode_index < 2:\n self._fader_modes.set_enabled(False)\n self._fader_button_modes.set_enabled(False)\n self.schedule_message(1, self._send_midi, SYSEX_START + (32, self._hc_byte, 247))\n else:\n self._display_reset_delay = STANDARD_DISPLAY_DELAY\n\n def _hyper_button_value(self, value, sender):\n if value > 0:\n if sender is self._drum_group_hyper_button:\n if self._hc_byte | PADS != self._hc_byte:\n self._hc_byte = self._hc_byte | PADS\n self._send_midi(SYSEX_START + (32, self._hc_byte, 247))\n self.schedule_message(1, self._set_drum_pads_to_hc)\n elif sender is self._encoder_group_fx_button or sender is self._encoder_group_mix_button:\n if self._hc_byte | ENCODERS != self._hc_byte:\n self._hc_byte = self._hc_byte | ENCODERS\n self._send_midi(SYSEX_START + (32, self._hc_byte, 247))\n self._encoder_group_midi_button.turn_off()\n if sender is self._encoder_group_fx_button:\n self._encoder_modes.set_enabled(True)\n self._display_reset_delay = 0\n return\n else:\n self.schedule_message(1, self._encoder_modes.set_enabled, True)\n self.schedule_message(1, self._encoder_modes.update)\n self._display_reset_delay = 2\n return\n elif sender is self._fader_group_fx_button or sender is self._fader_group_mix_button:\n if self._hc_byte | FADERS != self._hc_byte:\n self._hc_byte = self._hc_byte | FADERS\n self._send_midi(SYSEX_START + (32, self._hc_byte, 247))\n self._fader_group_midi_button.turn_off()\n self._fader_button_modes.set_enabled(True)\n if sender is self._fader_group_fx_button:\n self._fader_modes.set_enabled(True)\n self._fader_button_modes.set_enabled(True)\n self._display_reset_delay = 0\n return\n else:\n self.schedule_message(1, self._fader_modes.set_enabled, True)\n self.schedule_message(1, self._fader_modes.update)\n self.schedule_message(1, self._fader_button_modes.set_enabled, True)\n self.schedule_message(1, self._fader_button_modes.update)\n self._display_reset_delay = 2\n return\n self._display_reset_delay = 0\n\n def _set_drum_pads_to_hc(self):\n self._drum_group_midi_button.send_value(LED_OFF, True)\n self._drum_group_hyper_button.send_value(RED_FULL, True)\n for index in range(len(self._drum_pads)):\n self._drum_pads[index].send_value(RED_LOW, True)\n\n def _fader_button_value(self, value, sender):\n self._display_reset_delay = STANDARD_DISPLAY_DELAY\n\n def _fader_value(self, value, sender):\n param = sender.mapped_parameter()\n if param != None:\n param_range = param.max - param.min\n if param.name == 'Track Volume':\n if sender == self._master_fader:\n if self._has_faders:\n name_string = 'Master Vol'\n else:\n name_string = self._mixer_for_faders.selected_strip().track_name_data_source().display_string() + ' Vol'\n else:\n name_string = self._mixer_for_faders.channel_strip(self._faders.index(sender)).track_name_data_source().display_string() + ' Vol'\n else:\n name_string = param.name\n value = int((param.value - param.min) / param_range * 127)\n value_string = str(value)\n else:\n name_string = '<unmapped>'\n value_string = None\n self.schedule_message(1, self._set_value_string)\n self._set_name_string(name_string)\n self._set_value_string(value_string)\n\n def _encoder_value(self, value, sender):\n param = sender.mapped_parameter()\n if param != None:\n param_range = param.max - param.min\n if param.name == 'Track Volume':\n name_string = self._mixer_for_encoders.channel_strip(self._encoders.index(sender)).track_name_data_source().display_string() + ' Vol'\n value = int((param.value - param.min) / param_range * 127)\n elif param.name == 'Track Panning':\n name_string = self._mixer_for_encoders.channel_strip(self._encoders.index(sender)).track_name_data_source().display_string() + ' Pan'\n value = int(param.value / param_range * 127)\n if value < 0:\n name_string += ' L'\n elif value > 0:\n name_string += ' R'\n else:\n name_string += ' C'\n else:\n name_string = param.name\n value = int((param.value - param.min) / param_range * 127)\n value_string = str(value)\n else:\n name_string = '<unmapped>'\n value_string = None\n self.schedule_message(1, self._set_value_string)\n self._set_name_string(name_string)\n self._set_value_string(value_string)\n\n def _set_displays_to_default(self):\n self._name_display.segment(0).set_data_source(self._mixer_for_encoders.selected_strip().track_name_data_source())\n self._name_display.update()\n self._update_bank_value()\n self._set_value_string(None)\n self._send_midi(SYSEX_START + LCD_HC_DEFAULT)\n\n def _set_name_string(self, name_string):\n self._name_display.segment(0).set_data_source(self._name_display_data_source)\n self._name_display_data_source.set_display_string(name_string)\n self._display_reset_delay = STANDARD_DISPLAY_DELAY\n\n def _set_value_string(self, value_string = None):\n if value_string != None:\n self._value_display_data_source.set_display_string(value_string)\n else:\n self._value_display.reset()\n\n def _set_bank_string(self, bank_string = None):\n if bank_string != None:\n self._bank_display_data_source.set_display_string(bank_string)\n else:\n self._bank_display.reset()\n\n def _update_bank_value(self):\n bank = (self._session.track_offset() + 1) / self._session.width() + 1\n self._set_bank_string(str(bank))\n\n def _install_mapping(self, midi_map_handle, control, parameter, feedback_delay, feedback_map):\n if not self._in_build_midi_map:\n raise AssertionError\n raise midi_map_handle != None or AssertionError\n raise control != None and parameter != None or AssertionError\n raise isinstance(parameter, Live.DeviceParameter.DeviceParameter) or AssertionError\n raise isinstance(control, InputControlElement) or AssertionError\n raise isinstance(feedback_delay, int) or AssertionError\n if not isinstance(feedback_map, tuple):\n raise AssertionError\n success = False\n feedback_rule = None\n feedback_rule = control.message_type() is MIDI_NOTE_TYPE and Live.MidiMap.NoteFeedbackRule()\n feedback_rule.note_no = 0\n feedback_rule.vel_map = (0,)\n elif control.message_type() is MIDI_CC_TYPE:\n feedback_rule = Live.MidiMap.CCFeedbackRule()\n feedback_rule.cc_no = 0\n feedback_rule.cc_value_map = (0,)\n elif control.message_type() is MIDI_PB_TYPE:\n feedback_rule = Live.MidiMap.PitchBendFeedbackRule()\n feedback_rule.value_pair_map = feedback_map\n raise feedback_rule != None or AssertionError\n feedback_rule.channel = control.message_channel()\n feedback_rule.delay_in_ms = feedback_delay\n success = control.message_type() is MIDI_NOTE_TYPE and Live.MidiMap.map_midi_note_with_feedback_map(midi_map_handle, parameter, control.message_channel(), control.message_identifier(), feedback_rule)\n elif control.message_type() is MIDI_CC_TYPE:\n success = Live.MidiMap.map_midi_cc_with_feedback_map(midi_map_handle, parameter, control.message_channel(), control.message_identifier(), control.message_map_mode(), feedback_rule, not control.needs_takeover())\n elif control.message_type() is MIDI_PB_TYPE:\n success = Live.MidiMap.map_midi_pitchbend_with_feedback_map(midi_map_handle, parameter, control.message_channel(), feedback_rule, not control.needs_takeover())\n return success","repo_name":"gluon/AbletonLive9_RemoteScripts","sub_path":"Axiom_AIR_25_49_61/Axiom_AIR_25_49_61.py","file_name":"Axiom_AIR_25_49_61.py","file_ext":"py","file_size_in_byte":31879,"program_lang":"python","lang":"en","doc_type":"code","stars":503,"dataset":"github-code","pt":"5"} +{"seq_id":"34806306548","text":"# -*- coding: utf-8 -*-\n\"\"\"\nNumerai Predicitive Modeling\nTony Silva\n\"\"\"\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\n\ntrain = pd.read_csv(\"C:/Users/Anthony Silva/silvat/numerai/numerai_training_data.csv\", index_col=\"id\")\ntest = pd.read_csv(\"C:/Users/Anthony Silva/silvat/numerai/numerai_tournament_data.csv\", index_col=\"id\")\n# Concatenate Datasets to look at data preprocessing\ndf = pd.concat([train, test], axis=0)\n\n# Data Understanding\n# Two data sets given by Numerai\n# Training and Test, test set contains validation, test, and live\n# validation is used for leaderboard scores, test and live is used for scoring and payouts\n# test and live data do not have labels.\n# 154,025 rows of total data\n# 24 columns, 21 features\n# training set contains 108,405 rows\n# test set has 45,620 rows\n# Of the test set, validation has 16,686 rows, and test 27693, live has 1241\nprint(df.shape, \"total dataframe shape\")\nprint(train.shape, \"train dataframe shape\")\nprint(test.shape, \"total test dataframe shape\")\nprint(test.loc[test.data_type == \"live\",:].shape, \"live dataframe shape\")\nprint(test.loc[test.data_type == \"test\",:].shape, \"test dataframe shape\")\nprint(test.loc[test.data_type == \"validation\",:].shape, \"validation dataframe shape\")\n\n# No Missing Values in the features, only in the target\n# Makes sense because we combined training and tests sets in one df\nprint(df.isnull().sum())\n\n# Create histograms for every column in the dataframe\n# Commented out due to slow output\ncols = df.columns.values[2:-1]\ndef plotHists(cols):\n for i in cols:\n plt.figure()\n train[i].plot.hist()\n plt.show()\n plt.close()\nplotHists(cols)\n# Target has the nearly the same amount for 0 and 1 \nprint(df.groupby([\"target\"]).count())\n# Boxplots of each feature for the target value. \n# Looking to see if there are any significant differences between target 0 and target 1\n# for each of the different features\ndef plotFeatureTargets():\n for i in cols:\n plt.figure()\n z = \"Boxplot of Target Values on \" + i\n sns.boxplot(x=\"target\", y=i ,data=train[[i, \"target\"]]).set_title(z)\n plt.show()\n plt.close()\nplotFeatureTargets()\n# Averages for most features are almost similar.\nprint(train.groupby([\"target\"])[\"feature1\"].mean())\nprint(train.groupby([\"target\"])[\"feature1\"].std())\n# From R analysis, the means of each target are statistically different\nprint(train.groupby([\"target\"])[\"feature2\"].mean())\nprint(train.groupby([\"target\"])[\"feature2\"].std())\nprint(train.groupby([\"target\"])[\"feature3\"].mean())\nprint(train.groupby([\"target\"])[\"feature3\"].std())\n# Generate Correlation Matrix\nplt.figure()\nsns.heatmap(train.corr())\nplt.show()\nplt.close()\n","repo_name":"tonytrill/numerai","sub_path":"numerai.py","file_name":"numerai.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"28599266139","text":"import bd\nimport curso\nimport disciplina\nfrom pprint import pprint\n\n\ndef menu_disciplinas():\n running = True\n while running:\n print('1 - Adicionar Disciplina')\n print('2 - Procurar Disciplina')\n print('3 - Ver Todas Disciplinas')\n print('4 - Atualizar Disciplina')\n print('5 - Remover Disciplina')\n print('0 - Retornar')\n opc = input('Opc Selecionada: ')\n\n if opc == '1':\n disciplina.create_disciplina(input('Nome Disciplina: '), input('Nome Professor: '))\n\n elif opc == '2':\n pprint(disciplina.read_disciplina(input('Nome Disciplina: ')))\n\n elif opc == '3':\n for item in disciplina.read_discipina_todos():\n pprint(item)\n\n elif opc == '4':\n print('Atualizar:')\n print('1 - Nome')\n print('2 - Professor')\n prop = input('Opc Selecionada: ')\n\n if prop == '1':\n disciplina.update_disciplina(input('Nome Disciplina: '), 'nomeDisciplina', input('Novo Nome: '))\n elif prop == '2':\n disciplina.update_disciplina(input('Nome Disciplina: '), 'nomeProfessor', input('Novo Professor: '))\n else:\n print('Erro')\n\n elif opc == '5':\n disciplina.delete_disciplina(input('Nome Disciplina'))\n\n elif opc == '0':\n running = False\n else:\n print('Opção não encontrada')\n\n\ndef menu_cursos():\n running = True\n while running:\n print('1 - Adicionar Curso')\n print('2 - Procurar Curso')\n print('3 - Ver todos os Cursos')\n print('4 - Atualizar Curso')\n print('5 - Remover Curso')\n print('6 - Adicionar Disciplina a Curso')\n print('7- Remover Disciplina de Curso')\n print('0 - Retornar')\n opc = input('Opc Selecionada: ')\n\n if opc == '1':\n nome_curso = input('Nome Curso: ')\n lista_disciplinas = [x.strip() for x in input('Disciplinas (separado por virgula): ').split(',')]\n curso.create_curso(nome_curso, lista_disciplinas)\n\n elif opc == '2':\n pprint(curso.read_curso(input('Nome Curso: ')))\n\n elif opc == '3':\n for item in curso.read_curso_todos():\n pprint(item)\n\n elif opc == '4':\n curso.update_curso(input('Nome Curso: '), input('Novo nome: '))\n\n elif opc == '5':\n curso.delete_curso(input('Nome Curso: '))\n\n elif opc == '6':\n if not curso.add_disciplina_curso(input('Nome Curso: '), input('Nome Disciplina: ')):\n print('Disciplina nao encontrada')\n\n elif opc == '7':\n curso.remove_disciplina_curso(input('Nome Curso: '), input('Nome Disciplina: '))\n\n elif opc == '0':\n running = False\n else:\n print('Opção não encontrada')\n\n\ndef menu_principal():\n running = True\n while running:\n print('1 - Menu Cursos')\n print('2 - Menu Disciplinas')\n print('0 - Sair')\n opc = input('Opc Selecionada: ')\n\n if opc == '1':\n menu_cursos()\n elif opc == '2':\n menu_disciplinas()\n elif opc == '0':\n running = False\n else:\n print('Opção não encontrada')\n\n\nif __name__ == '__main__':\n bd.clear_bd()\n disciplina.popula_disciplina()\n curso.popula_curso()\n curso.add_disciplina_curso('Ciência da Computação', 'Computaçao 1')\n curso.add_disciplina_curso('Ciência da Computação', 'Circuitos')\n pprint(curso.read_curso('Ciência da Computação'), sort_dicts=False)\n menu_principal()\n","repo_name":"bbrez/BD-T1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4995783035","text":"import sys\nfrom os import path\n\nimport typer\n\nSUCCESS_COLOR = typer.colors.GREEN\nWARNING_COLOR = typer.colors.YELLOW\nERROR_COLOR = typer.colors.BRIGHT_RED\n\n\ndef show_info(msg: str):\n typer.echo(msg)\n\n\ndef show_success(msg: str):\n typer.secho(msg, fg=SUCCESS_COLOR, err=True)\n\n\ndef show_warning(msg: str):\n typer.secho(msg, fg=WARNING_COLOR, err=True)\n\n\ndef show_error(msg: str):\n typer.secho(msg, fg=ERROR_COLOR, err=True)\n\n\ndef get_input_data(input_file: typer.FileText, input_string: str) -> str:\n if input_file is not None:\n if input_string is not None:\n show_warning(\n \"Warning: The input string is ignored since input file is specified.\"\n )\n return input_file.read()\n\n if input_string is not None:\n return input_string\n\n show_error(\"Error: Either a string or a file is required as input.\")\n show_info(\"See help (--help) for more info.\")\n raise typer.Exit(code=1)\n\n\ndef write_output(output_file: typer.FileTextWrite, output_data: str):\n if output_file is None:\n return typer.echo(output_data)\n\n output_file.write(output_data)\n\n if output_file is sys.stdout:\n return\n\n show_success(f\"Output written to: {path.realpath(output_file.name)}\")\n","repo_name":"indic-transliteration/indic_transliteration_py","sub_path":"indic_transliteration/sanscript_cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"5"} +{"seq_id":"9891965020","text":"from segmentation import config\nfrom segmentation.eIMMC import eIMMC\nfrom sklearn.model_selection import GridSearchCV\nimport copy as cp\nimport numpy as np\nimport pickle\n\n\nclass GlobalPatternAnalysis:\n # init LBS by inferring immc model from data\n # 1. FROM DATA: input X:data, Xd:state durations, Xgt:ground truth\n # 2. FROM FILE: input X:path\n def __init__(self, X, Xd=None, Xgt=None):\n self.MIX_BASED = 0\n self.TIME_BASED = 1\n self.COUNT_BASED = 2\n self.clf = None\n\n if isinstance(X, str):\n self.mc = pickle.load(open(X, 'rb'))\n self.immc = eIMMC()\n self.immc.load_model(self.mc)\n else:\n Xgt = [itm for seq in Xgt for itm in seq]\n\n # fit model to data\n self.immc = eIMMC()\n if len(Xd) == 0:\n self.immc.cfg.collapsed = True\n self.mc = self.immc.fit(X, durations=Xd, gt_display=Xgt)\n\n # # convert data according to identified super states\n # X_transformed = []\n # for session in X:\n # x_t = [dc.assignments.pop(0) for i in session]\n # X_transformed.append(x_t)\n\n # project data into vector space spanned by super states\n # modus 0: mixed, 1: time-based, 2: count-based\n def project(self, Xe, Xed=[], modus=0):\n if len(Xed) == 0: modus = self.COUNT_BASED\n X_project = []\n if not all(isinstance(el, list) for el in Xe):\n Xe = [Xe]\n Xed = [Xed]\n if not all(isinstance(el, list) for seq in Xe for el in seq):\n Xe = [Xe]\n Xed = [Xed]\n\n for X_entity, Xd_entity in zip(Xe, Xed):\n X_transformed = self.immc.transform(X_entity, Xd_entity)\n X_flat = [ele for seq in X_transformed for ele in seq]\n if modus < self.COUNT_BASED:\n x_ent = np.zeros(self.mc.cfg.L)\n Xd_flat = [ele for seq in Xd_entity for ele in seq]\n x_ent[:np.max(X_flat) + 1] = np.bincount(X_flat, Xd_flat)\n if modus == 1: X_project.append(x_ent)\n if modus % 2 == 0:\n xd_ent = np.zeros(self.mc.cfg.L)\n idx, bincounts = np.unique(X_flat, return_counts=True)\n xd_ent[idx] += bincounts\n if modus == 2: X_project.append(xd_ent)\n if modus == 0: X_project.append(x_ent + xd_ent)\n return X_project\n\n def fit(self, fct, Xp, y, params):\n self.clf = GridSearchCV(fct, params, cv=5, scoring='f1_weighted')\n self.clf.fit(Xp, y)\n\n def predict(self, Xp):\n return self.clf.predict(Xp)\n","repo_name":"WWW2018review/Code","sub_path":"gpa/gpa.py","file_name":"gpa.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8499486613","text":"Import('RTT_ROOT')\nImport('rtconfig')\nfrom building import *\n\ncwd = GetCurrentDir()\nsrc = Split(\"\"\"\nos/GUI_X.c\n\"\"\")\n\t\n\t\npath = [cwd + '/inc']\n\t\nCPPPATH = path\ngroup = []\n\nif GetDepend(['RT_USING_EMWIN']):\n group = DefineGroup('emWin_library', src, depend = ['RT_USING_EMWIN'], CPPPATH = CPPPATH)\n\nReturn('group')","repo_name":"sundm75/STM32F107Board-rttproject","sub_path":"RTTexamples/Libraries/STemWinLibrary522/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"71007654873","text":"def main():\r\n import sys\r\n\r\n def gcd(a, b): \r\n\r\n if b > a: \r\n a , b = b, a\r\n\r\n while True: \r\n if b == 0: \r\n return int(a)\r\n else: \r\n b, a = a % b, b\r\n\r\n\r\n def linear_cong(a,b,c): # the linear congruence function will make use of the extended euclidean algorithm to solved the congruence\r\n # ax ≅ c (mod b)\r\n \r\n if gcd(a, b) > 1: # if the gcd(a, b) is 1, no need to check\r\n\r\n if c % gcd(a, b) != 0: # standard result from linear diophantine equation that no solution exists if C is not divisible\r\n return \"\\nNO SOLUTIONS POSSIBLE!\" # by gcd(a, b) because this can be re-written as ax + by = c and both sides need to be intgers\r\n\r\n g = gcd(a, b) # reducing a, b by their gcd, also c\r\n a //= g\r\n b //= g\r\n c //= g\r\n\r\n # this is the fun part\r\n # I will explain using an example of a = 29, b = 67 and c = 129, thus 29x + 67y = 129\r\n # this is equivalent to solving x = (129 - 67*y)/29\r\n # we reduce this to x = -2*y + (129 - 9*y)/29\r\n # now substitute (129 - 9*y)/29 = u\r\n # solving y in terms of u we get (129 - 29*u)/9\r\n # the key observation is how 67 got reduced to 29 and 29 got reduced to 9\r\n # repeating these steps continuously till 29 is reduced to gcd(67, 29) or gcd(a, b)\r\n # we get a continued fraction in the denominator (recursion was an obvious choice for me here) of the form (c - b*f)/gcd(a, b)\r\n # now we can simply put any integer of our choice in place of 'f' and i chose c//a\r\n # this will very easily return and integer value of 'x' in the end!\r\n\r\n if b == 1: \r\n return c//a \r\n\r\n else:\r\n a, b = b, a % b\r\n u = linear_cong(a,b,c)\r\n\r\n return (c - a*u)//b\r\n\r\n # time complexity of this function is O(logn) as well as it just repeats until we find the gcd(a, b)\r\n # the extended euclidean algorithm\r\n # just like the GCD program, it starts to falter for number with more than 15 digits\r\n \r\n while True:\r\n\r\n message = str(input(\"This is a linear congruence calculator. Press 'Y' to use it and 'N' to exit: \"))\r\n\r\n if message.upper() == 'N':\r\n print(\"\\nThanks for using the Linear congruence calculator!\")\r\n break\r\n\r\n elif message.upper() == 'Y':\r\n\r\n try:\r\n a = int(input(\"\\nENTER A: \"))\r\n b = int(input(\"ENTER B: \"))\r\n c = int(input(\"ENTER C: \"))\r\n \r\n except ValueError:\r\n print(\"\\nInvalid Input! Please try again.\")\r\n continue\r\n \r\n k = linear_cong(a, b, c)\r\n print(k)\r\n \r\n else:\r\n print(\"\\nInvalid Input! Please try again.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"EpsilonNought117/Number-Theory-Calculator","sub_path":"linear_congruence.py","file_name":"linear_congruence.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38950843349","text":"from __future__ import annotations\n\nimport os\nfrom argparse import ArgumentParser\nfrom collections.abc import Iterable\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom tensorflow import keras\n\nfrom afids_cnn.generator import customImageDataGenerator\n\n\ndef gen_training_array(\n num_channels: int,\n dims: NDArray,\n patches_path: os.PathLike[str] | str,\n) -> NDArray:\n \"\"\"Generate a training array containing patches of raw image and AFID location.\"\"\"\n bps = 4 * num_channels * np.prod(dims)\n file_size = os.path.getsize(patches_path)\n num_samples = np.floor_divide(file_size, bps)\n\n arr_shape_train = (int(num_samples), dims[0], dims[1], dims[2], num_channels)\n\n arr_train = np.memmap(patches_path, \"float32\", \"r\", shape=arr_shape_train)\n return np.swapaxes(arr_train, 1, 3)\n\n\ndef create_generator(\n arr_train: NDArray,\n batch_size: int,\n) -> Iterable[tuple[NDArray, NDArray]]:\n x_train = arr_train[..., 0]\n x_train = x_train.reshape(*x_train.shape[:4], 1)\n y_train = arr_train[..., 1]\n y_train = y_train.reshape(*y_train.shape[:4], 1)\n\n datagen_train = customImageDataGenerator()\n return datagen_train.flow(x_train, y_train, batch_size=batch_size)\n\n\ndef gen_conv3d_layer(\n filters: int,\n kernel_size: tuple[int, int, int] = (3, 3, 3),\n) -> keras.layers.Conv3D:\n return keras.layers.Conv3D(filters, kernel_size, padding=\"same\", activation=\"relu\")\n\n\ndef gen_max_pooling_layer() -> keras.layers.MaxPooling3D:\n return keras.layers.MaxPooling3D((2, 2, 2))\n\n\ndef gen_transpose_layer(filters: int) -> keras.layers.Conv3DTranspose:\n return keras.layers.Conv3DTranspose(\n filters,\n kernel_size=2,\n strides=2,\n padding=\"same\",\n )\n\n\ndef gen_std_block(filters: int, input_):\n x = gen_conv3d_layer(filters)(input_)\n out_layer = gen_conv3d_layer(filters)(x)\n return out_layer, gen_max_pooling_layer()(out_layer)\n\n\ndef gen_opposite_block(filters: int, input_, out_layer):\n x = input_\n for _ in range(3):\n x = gen_conv3d_layer(filters)(x)\n next_filters = filters // 2\n x = gen_transpose_layer(next_filters)(x)\n x = gen_conv3d_layer(next_filters)(x)\n return keras.layers.Concatenate(axis=4)([out_layer, x])\n\n\ndef gen_model() -> keras.Model:\n input_layer = keras.layers.Input((None, None, None, 1))\n x = keras.layers.ZeroPadding3D(padding=((1, 0), (1, 0), (1, 0)))(input_layer)\n\n out_layer_1, x = gen_std_block(16, x) # block 1\n out_layer_2, x = gen_std_block(32, x) # block 2\n out_layer_3, x = gen_std_block(64, x) # block 3\n out_layer_4, x = gen_std_block(128, x) # block 4\n\n # bottleneck\n x = gen_conv3d_layer(256)(x)\n x = gen_conv3d_layer(256)(x)\n x = keras.layers.Conv3DTranspose(filters=128, kernel_size=2, strides=(2, 2, 2))(x)\n x = gen_conv3d_layer(128, (2, 2, 2))(x)\n x = keras.layers.Concatenate(axis=4)([out_layer_4, x])\n\n x = gen_opposite_block(128, x, out_layer_3) # block 5 (opposite 4)\n x = gen_opposite_block(64, x, out_layer_2) # block 6 (opposite 3)\n x = gen_opposite_block(32, x, out_layer_1) # block 7 (opposite 2)\n\n # block 8 (opposite 1)\n for _ in range(3):\n x = gen_conv3d_layer(16)(x)\n\n # output layer\n x = keras.layers.Cropping3D(cropping=((1, 0), (1, 0), (1, 0)), data_format=None)(x)\n x = keras.layers.Conv3D(1, (1, 1, 1), padding=\"same\", activation=None)(x)\n\n return keras.Model(input_layer, x)\n\n\ndef fit_model(\n model: keras.Model,\n new_train: Iterable[tuple[NDArray, NDArray]],\n model_out_path: os.PathLike[str] | str,\n loss_out_path: os.PathLike[str] | str | None,\n epochs: int = 100,\n steps_per_epoch: int = 50,\n loss_fn: keras.losses.Loss | str = \"mse\",\n optimizer: keras.optimizers.Optimizer | str | None = None,\n metrics: list[keras.metrics.Metric | str] | None = None,\n validation_data: Iterable[tuple[NDArray, NDArray]] | None = None,\n validation_steps: int = 50,\n callbacks: Iterable[keras.callbacks.Callback] | None = None,\n):\n if not optimizer:\n optimizer = keras.optimizers.Adam()\n if not metrics:\n metrics = [keras.metrics.RootMeanSquaredError()]\n\n model.compile(\n loss=[loss_fn],\n optimizer=optimizer,\n metrics=metrics,\n )\n history = model.fit(\n new_train,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n validation_data=validation_data,\n validation_steps=validation_steps,\n callbacks=callbacks,\n )\n model.save(model_out_path)\n if loss_out_path:\n pd.DataFrame(history.history).to_csv(loss_out_path)\n return history, model\n\n\ndef gen_parser() -> ArgumentParser:\n parser = ArgumentParser()\n parser.add_argument(\"num_channels\", type=int)\n parser.add_argument(\"radius\", type=int)\n parser.add_argument(\"patches_path\")\n parser.add_argument(\"model_out_path\")\n parser.add_argument(\"--loss_out_path\")\n parser.add_argument(\"--validation_patches_path\")\n parser.add_argument(\"--epochs\", type=int, default=100)\n parser.add_argument(\"--steps_per_epoch\", type=int, default=50)\n parser.add_argument(\"--loss_fn\", default=\"mse\")\n parser.add_argument(\"--optimizer\", default=\"adam\")\n parser.add_argument(\"--metrics\", nargs=\"*\", default=[\"RootMeanSquaredError\"])\n parser.add_argument(\"--validation_steps\", type=int, default=50)\n parser.add_argument(\"--do_early_stopping\", action=\"store_true\")\n return parser\n\n\ndef main():\n args = gen_parser().parse_args()\n model = gen_model()\n new_train = create_generator(\n gen_training_array(\n args.num_channels,\n np.array([(args.radius * 2) + 1 for _ in range(3)]),\n args.patches_path,\n ),\n batch_size=10,\n )\n validation_data = (\n create_generator(\n gen_training_array(\n args.num_channels,\n np.array([(args.radius * 2) + 1 for _ in range(3)]),\n args.validation_patches_path,\n ),\n batch_size=10,\n )\n if args.validation_patches_path\n else None\n )\n\n callbacks = (\n [keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=100)]\n if args.do_early_stopping\n else None\n )\n fit_model(\n model,\n new_train,\n args.model_out_path,\n args.loss_out_path,\n epochs=args.epochs,\n steps_per_epoch=args.steps_per_epoch,\n loss_fn=args.loss_fn,\n optimizer=args.optimizer,\n metrics=args.metrics,\n validation_data=validation_data,\n validation_steps=args.validation_steps,\n callbacks=callbacks,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"afids/afids-CNN","sub_path":"afids_cnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"4517175551","text":"class Solution:\n def findShortestSubArray(self, nums: List[int]) -> int:\n dic, max_degree, res = collections.defaultdict(list), 0, float('inf')\n for i in range(len(nums)):\n dic[nums[i]].append(i + 1)\n max_degree = max(len(dic[nums[i]]), max_degree)\n if max_degree == 1: return 1\n for i in dic:\n if len(dic[i]) == max_degree:\n res = min(res, dic[i][-1] - dic[i][0] + 1)\n return res","repo_name":"ThomasJRooney/practice","sub_path":"leetcode/LC697_subarray.py","file_name":"LC697_subarray.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"6991125698","text":"\"\"\"\n72. 编辑距离\n给你两个单词 word1 和 word2,请你计算出将 word1 转换成 word2 所使用的最少操作数 。\n你可以对一个单词进行如下三种操作:\n插入一个字符\n删除一个字符\n替换一个字符\n\"\"\"\n\n\"\"\"\n方式1 动态规划\n\"\"\"\n\"\"\"\n方式1\n状态转移方程\nif i=j f(i,j)=f(i-1,j-1)\nif i!=j f(i,j)=min(f(i-1,j),f(i,j-1),f(i-1,j-1))+1 \n\"\"\"\n\n\nclass Solution:\n def minDistance(self, word1: str, word2: str) -> int:\n m = len(word1)\n n = len(word2)\n if not m or not n:\n return max(m, n)\n dp = [[0] * (n+1) for _ in range(m+1)]\n dp[0] = [i for i in range(n+1)]\n for i in range(m+1):\n dp[i][0] = i\n for i in range(1, m+1):\n for j in range(1, n+1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]\n\n\nif __name__ == '__main__':\n a = \"a\"\n b = \"b\"\n s = Solution()\n print(s.minDistance(a, b))\n","repo_name":"jonbenzhang/python-","sub_path":"08leetcode/1_100/72_编辑距离.py","file_name":"72_编辑距离.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36109070854","text":"#随机森林\nfrom sklearn.ensemble import RandomForestClassifier\nimport xlrd\nimport matplotlib.pyplot as plt\nimport openpyxl\nimport datetime\n#%matplotlib inline\n \n#读取数据\ndata_path1 = './data/14A17normalRF.xlsx'\ndata_path2 = './data/14A17abnormalRF.xlsx'\ndata = [];Y=[]\ndata_xsls = xlrd.open_workbook(data_path1) \nsheet_name = data_xsls.sheets()[0] \ncount_nrows = sheet_name.nrows \nfor i in range(1,count_nrows,4):\n a = []\n for j in range(9):\n a.append(sheet_name.cell_value(i,j+1))\n Y.append(1)\n data.append(a)\ndata_xsls = xlrd.open_workbook(data_path2) \nsheet_name = data_xsls.sheets()[0] \ncount_nrows = sheet_name.nrows \nfor i in range(1,count_nrows):\n a = []\n for j in range(9):\n a.append(sheet_name.cell_value(i,j+1))\n data.append(a)\n Y.append(0)\n\nclf = RandomForestClassifier().fit(data,Y)\n\n\nY = []\ndataabnormal = [];datatime = []\ndata_xsls = xlrd.open_workbook(\"./data/14A18normalRF.xlsx\") \nsheet_name = data_xsls.sheets()[0] \ncount_nrows = sheet_name.nrows \nfor i in range(1,count_nrows):\n a = []\n for j in range(9):\n a.append(sheet_name.cell_value(i,j+1))\n dataabnormal.append(a)\n b = xlrd.xldate_as_tuple(sheet_name.cell_value(i,0),0)\n datatime.append(datetime.datetime(*b))\n Y.append(clf.predict([a]))\n \n\nxls=openpyxl.Workbook()\nsheet = xls.active\npos = 0\nfor i in range(len(Y)):\n if Y[i] == 0:\n for j in range(9):\n sheet.cell(i+2-pos,j+2,dataabnormal[i][j])\n sheet.cell(i+2-pos,1,datatime[i])\n else:\n pos+=1\nxls.save('./data/14A18PredictRF.xlsx')","repo_name":"Eureke-Ch/failure-warning-of-ower-plant-auxiliary-equipment","sub_path":"RF.py","file_name":"RF.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"30348731760","text":"from contextlib import contextmanager, ContextDecorator\n\ndefault_exc_msg = 'Test'\n\n\n@contextmanager\ndef silence_exception(expected_exc_type, expected_exc_msg=default_exc_msg):\n try:\n yield\n except Exception as caught_exc:\n if type(caught_exc) != expected_exc_type or \\\n str(caught_exc) != expected_exc_msg:\n raise type(caught_exc)(str(caught_exc))\n\n\nclass silence_exception_class(ContextDecorator):\n def __init__(self, exc_type, exc_msg=default_exc_msg):\n self.expected_exc_type = exc_type\n self.expected_exc_msg = exc_msg\n\n def __enter__(self):\n return self\n\n def __exit__(self, caught_exc_type, caught_exc_value, traceback):\n same_type = self.expected_exc_type == caught_exc_type\n same_message = caught_exc_value is not None and \\\n self.expected_exc_msg == str(caught_exc_value)\n\n return same_type and same_message\n","repo_name":"hpmalinova/Hack-Bulgaria","sub_path":"week7/01. Context Managers/silence_exc.py","file_name":"silence_exc.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7895807477","text":"students=[\n ['홍길동',100,90,80,-1,-1,-1],\n ['김세진',90,90,80,-1,-1,-1],\n ['고길동',80,90,80,-1,-1,-1],\n ['마이콜',70,90,80,-1,-1,-1],\n\n]\n\n_NAME_IX=0\n_KOR_IX=1\n_ENG_IX=2\n_MATH_IX=3\n_TOTAL_IX=4\n_AVG_IX=5\n_RANK_IX=6\n_SUBJECT_COUNT=3 #과목수\n\ndef calc_total(scores):\n for score in scores:\n score[_TOTAL_IX]=score[_KOR_IX]+score[_ENG_IX]+score[_MATH_IX]\n\ndef calc_average(scores):\n for score in scores:\n score[_AVG_IX]=score[_TOTAL_IX]/_SUBJECT_COUNT\n\n\ndef sort_fn(scores):\n return scores[_TOTAL_IX]\n\ndef sort_name(scores):\n return scores[_NAME_IX]\n\ndef calc_rank(scores):\n scores.sort(key=sort_fn,reverse=True)\n rank=0\n oldScore=-1\n for ix,score in enumerate(scores):\n if oldScore!=score[_TOTAL_IX]:\n rank=ix+1\n oldScore=score[_TOTAL_IX]\n\n scores[ix][_RANK_IX]=rank\n\ndef print_menu():\n print(\"성적관리 메뉴\")\n print(\"출력(P) | 검색(S) | 종료(X)\")\n\ndef get_average(scores):\n total=0\n for score in scores:\n total +=score[_AVG_IX]\n return total/len(scores)\n\n\ndef find_by_name(scores,name):\n result=[]\n for score in scores:\n if(name==score[_NAME_IX]):\n result.append(score)\n return result\n\n\ndef search(scores):\n name=input('검색할 이름')\n search_student=find_by_name(scores,name)\n if search_student:\n print_student(search_student)\n else:\n print(\"%s 학생이 없습니다\"%name)\n\n\ndef print_student(scores,sort_fn=sort_name):\n print('------------------------------------------')\n print(' 이름 국어 영어 수학 총점 평균 순위')\n print('------------------------------------------')\n for score in scores:\n print(\"%5s%5d%5d%5d %5d %5d%5d\"%(score[_NAME_IX],score[_KOR_IX],score[_ENG_IX],score[_MATH_IX],score[_TOTAL_IX],score[_AVG_IX],score[_RANK_IX]))\n print('------------------------------------------')\n\n\ndef print_scores(scores,sort_fn=sort_name):\n avg=get_average(scores)\n scores.sort(key=sort_fn)\n print_student(scores)\n print('전체 평균 : %.2f'%avg)\n\n\n\ncalc_total(students)\ncalc_average(students)\ncalc_rank(students)\n\n\n\nwhile True:\n print_menu()\n select=input('메뉴 선택:')\n if select =='P':\n print('-----출력 선택')\n print_score(students)\n elif select =='S':\n print('----검색 선택')\n search(students)\n elif select=='X':\n print('-----종료 선택')\n break\n else:\n print('올바르지 않은 입력입니다.')\n","repo_name":"rjsgmlsms126/raspberrypi","sub_path":"untitled/venv/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"3734660112","text":"import setuptools\r\nimport os\r\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__),os.pardir)))#待理解\r\nwith open(\"README.md\",\"r\") as fh:\r\n long_description=fh.read()\r\n\r\nsetuptools.setup(\r\n name=\"WYC's frist package\",\r\n version=\"0.0.1\",\r\n author=\"吴宇晨\",\r\n author_email=\"2296136694@qq.com\",\r\n description=\"用来学习发布包的包\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"\",\r\n packages=setuptools.find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n )\r\n","repo_name":"Wuyuchen2190/my-first-module-packages-repository","sub_path":"daifabudebao/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73931131032","text":"from PIL import Image, ImageDraw, ImageFont\n \nimg = Image.open('qr_resized.png','r')\n\nnumberfont = ImageFont.truetype('Roboto-Medium.ttf', 28)\nnamefont = ImageFont.truetype('Roboto-Bold.ttf', 25)\nd = ImageDraw.Draw(img)\n#Name\nd.text((28,321), \"Jaddu\", font=namefont, fill=(255,255,255))\n#AC Number\nd.text((28,242), \"1090026\", font=numberfont, fill=(255,255,255))\n \nimg.save('pil_text.png')","repo_name":"jeganathpv/FaceID_ATM","sub_path":"Workspace/POC-Testing/qr-create/write-image.py","file_name":"write-image.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16656446242","text":"def Undo_AddEntity():\n from editor_python_test_tools.wait_utils import PrefabWaiter\n\n # One of the undos is for entity selection.\n azlmbr.legacy.general.undo()\n PrefabWaiter.wait_for_propagation()\n azlmbr.legacy.general.undo()\n PrefabWaiter.wait_for_propagation()\n\ndef Redo_AddEntity():\n from editor_python_test_tools.wait_utils import PrefabWaiter\n\n # One of the redos is for entity selection.\n azlmbr.legacy.general.redo()\n PrefabWaiter.wait_for_propagation()\n azlmbr.legacy.general.redo()\n PrefabWaiter.wait_for_propagation()\n \n\ndef AddEntity_UnderUnfocusedInstanceAsOverride():\n \"\"\"\n Test description:\n - Create a car prefab/instance with the following hierarchy, and focus on the car:\n CAR (car instance, focused)\n |- AXLE (axle instance)\n |- LeftWheel (wheel instance)\n |- WheelEntity\n |- RightWheel (wheel instance)\n |- WheelEntity\n\n - Create a new entity 'TireEntity' and add it under 'LeftWheel' as override of car instance.\n - Undo/Redo adding 'TireEntity' under 'LeftWheel'.\n - Check that 'TireEntity' is correctly added without accidentally modifying another wheel instance 'RightWheel' during prefab propagations.\n \"\"\"\n\n from editor_python_test_tools.editor_entity_utils import EditorEntity\n from editor_python_test_tools.prefab_utils import Prefab\n from editor_python_test_tools.wait_utils import PrefabWaiter\n\n import Prefab.tests.PrefabTestUtils as prefab_test_utils\n\n prefab_test_utils.open_base_tests_level()\n\n from pathlib import Path\n WHEEL_PREFAB_FILE_NAME = Path(__file__).stem + '_' + 'wheel_prefab' \n AXLE_PREFAB_FILE_NAME = Path(__file__).stem + '_' + 'axle_prefab'\n CAR_PREFAB_FILE_NAME = Path(__file__).stem + '_' + 'car_prefab'\n\n # Create wheel/axle/car prefabs and instances, and then focus on car instance. \n LEFT_WHEEL_INSTANCE_NAME = \"LeftWheel\"\n RIGHT_WHEEL_INSTANCE_NAME = \"RightWheel\"\n WHEEL_ENTITY_NAME = \"WheelEntity\"\n\n wheel_entity = EditorEntity.create_editor_entity(name=WHEEL_ENTITY_NAME)\n assert wheel_entity.id.IsValid(), f\"Couldn't create entity '{WHEEL_ENTITY_NAME}'\"\n\n wheel_prefab, left_wheel_instance = Prefab.create_prefab([wheel_entity], WHEEL_PREFAB_FILE_NAME, prefab_instance_name=LEFT_WHEEL_INSTANCE_NAME)\n right_wheel_instance = wheel_prefab.instantiate(name=RIGHT_WHEEL_INSTANCE_NAME)\n \n AXLE_INSTANCE_NAME = \"AXLE\"\n _, axle_instance = Prefab.create_prefab(\n [left_wheel_instance.container_entity, right_wheel_instance.container_entity], AXLE_PREFAB_FILE_NAME, prefab_instance_name=AXLE_INSTANCE_NAME)\n \n CAR_INSTANCE_NAME = \"CAR\"\n _, car_instance = Prefab.create_prefab([axle_instance.container_entity], CAR_PREFAB_FILE_NAME, prefab_instance_name=CAR_INSTANCE_NAME)\n car_instance.container_entity.focus_on_owning_prefab()\n \n # Find the container entity of 'LeftWheel', creates a new entity 'TireEntity', and adds the new entity under 'LeftWheel'.\n left_wheel_instance_container_entity = EditorEntity.find_editor_entity(entity_name=LEFT_WHEEL_INSTANCE_NAME, must_be_unique=True)\n assert left_wheel_instance_container_entity.id.IsValid(), f\"Couldn't find valid entity '{LEFT_WHEEL_INSTANCE_NAME}'\"\n\n TIRE_ENTITY_NAME = \"TireEntity\"\n tire_entity = EditorEntity.create_editor_entity(parent_id=left_wheel_instance_container_entity.id, name=TIRE_ENTITY_NAME)\n # Wait till prefab propagation finishes before validation.\n PrefabWaiter.wait_for_propagation()\n\n # Check if 'TireEntity' is added under 'LeftWheel' only correctly. \n assert tire_entity.id.IsValid(), f\"Couldn't create entity '{TIRE_ENTITY_NAME}'' under prefab instance '{LEFT_WHEEL_INSTANCE_NAME}'\"\n assert tire_entity.get_name() == TIRE_ENTITY_NAME, f\"Entity '{tire_entity.get_name()}''s name should be {TIRE_ENTITY_NAME}\"\n assert tire_entity.get_parent_id() == left_wheel_instance_container_entity.id, f\"Entity '{LEFT_WHEEL_INSTANCE_NAME}' should be the parent of entity '{TIRE_ENTITY_NAME}'\"\n prefab_test_utils.validate_expected_override_status(tire_entity, True)\n\n child_entity_ids = left_wheel_instance_container_entity.get_children()\n assert len(child_entity_ids) == 2, f\"{len(child_entity_ids)} child entities found under entity '{LEFT_WHEEL_INSTANCE_NAME}'\" \\\n f\" after add-entity operation, when there should have been 2 child entities\"\n\n right_wheel_instance_container_entity = EditorEntity.find_editor_entity(entity_name=RIGHT_WHEEL_INSTANCE_NAME, must_be_unique=True)\n assert right_wheel_instance_container_entity.id.IsValid(), f\"Couldn't find valid entity '{RIGHT_WHEEL_INSTANCE_NAME}'\"\n\n child_entity_ids = right_wheel_instance_container_entity.get_children()\n assert len(child_entity_ids) == 1, f\"{len(child_entity_ids)} child entities found under entity '{RIGHT_WHEEL_INSTANCE_NAME}'\" \\\n f\" after add-entity operation, when there should have been 1 child entity\"\n\n # Test undo/redo on adding 'TireEntity' under 'LeftWheel'.\n Undo_AddEntity()\n\n left_wheel_instance_container_entity = EditorEntity.find_editor_entity(entity_name=LEFT_WHEEL_INSTANCE_NAME, must_be_unique=True)\n assert left_wheel_instance_container_entity.id.IsValid(), f\"Couldn't find valid entity '{LEFT_WHEEL_INSTANCE_NAME}'\"\n\n child_entity_ids = left_wheel_instance_container_entity.get_children()\n assert len(child_entity_ids) == 1, f\"{len(child_entity_ids)} child entities found under entity '{LEFT_WHEEL_INSTANCE_NAME}'\" \\\n f\" after Undo operation, when there should have been 1 child entity\"\n \n tire_entities = EditorEntity.find_editor_entities(entity_names=[TIRE_ENTITY_NAME])\n assert len(tire_entities) == 0, f\"{len(tire_entities)} '{TIRE_ENTITY_NAME}' entities exist\" \\\n f\" after Undo operation, when there shouldn't have been any\"\n\n Redo_AddEntity()\n \n left_wheel_instance_container_entity = EditorEntity.find_editor_entity(entity_name=LEFT_WHEEL_INSTANCE_NAME, must_be_unique=True)\n assert left_wheel_instance_container_entity.id.IsValid(), f\"Couldn't find valid entity '{LEFT_WHEEL_INSTANCE_NAME}'\"\n\n tire_entity = EditorEntity.find_editor_entity(entity_name=TIRE_ENTITY_NAME, must_be_unique=True)\n assert tire_entity.id.IsValid(), f\"Couldn't find valid entity '{TIRE_ENTITY_NAME}'\"\n\n assert tire_entity.get_parent_id() == left_wheel_instance_container_entity.id, f\"Entity '{LEFT_WHEEL_INSTANCE_NAME}' should be the parent of entity '{TIRE_ENTITY_NAME}'\"\n\n child_entity_ids = left_wheel_instance_container_entity.get_children()\n assert len(child_entity_ids) == 2, f\"{len(child_entity_ids)} child entities found under entity '{LEFT_WHEEL_INSTANCE_NAME}'\" \\\n f\" after Redo operation, when there should have been 2 child entity\"\n\n right_wheel_instance_container_entity = EditorEntity.find_editor_entity(entity_name=RIGHT_WHEEL_INSTANCE_NAME, must_be_unique=True) \n assert right_wheel_instance_container_entity.id.IsValid(), f\"Couldn't find valid entity '{RIGHT_WHEEL_INSTANCE_NAME}'\"\n\n child_entity_ids = right_wheel_instance_container_entity.get_children()\n assert len(child_entity_ids) == 1, f\"{len(child_entity_ids)} child entities found under entity '{RIGHT_WHEEL_INSTANCE_NAME}'\" \\\n f\" after Redo operation, when there should have been 1 child entity\"\n # Revert the re-applied overrides\n tire_entity.revert_overrides()\n PrefabWaiter.wait_for_propagation()\n\n # Validate the revert\n tire_entities = EditorEntity.find_editor_entities(entity_names=[TIRE_ENTITY_NAME])\n assert len(tire_entities) == 0, f\"Expected 0 '{TIRE_ENTITY_NAME}' entities after Revert Overrides operation, but \" \\\n f\"found {len(tire_entities)}.\"\n\n\nif __name__ == \"__main__\":\n from editor_python_test_tools.utils import Report\n Report.start_test(AddEntity_UnderUnfocusedInstanceAsOverride)\n","repo_name":"o3de/o3de","sub_path":"AutomatedTesting/Gem/PythonTests/Prefab/tests/overrides/AddEntity_UnderUnfocusedInstanceAsOverride.py","file_name":"AddEntity_UnderUnfocusedInstanceAsOverride.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","stars":7004,"dataset":"github-code","pt":"5"} +{"seq_id":"43156569670","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nimport nicefunctions as fun\n\n# loading the data\ntestrun = np.loadtxt(open(\"Testrun_511keV.Spe\").readlines()[:-16], skiprows=12)\nM1 = np.loadtxt(open(\"measurement1_time_offset.Spe\").readlines()[:-16], skiprows=12)\nM2 = np.loadtxt(open(\"measurement2_lifetime_in_aluminium.Spe\").readlines()[:-16], skiprows=12)\nM3 = np.loadtxt(open(\"measurement3_main.Spe\").readlines()[:-16], skiprows=12)\ntimedata = np.loadtxt(open(\"timecalibration.Spe\").readlines()[:-16], skiprows=12)\n\n# normally plotting the data using fun.plot\nfun.plot(M1, \"Testrun\", filename=\"Testrun\")\nfun.plot(M1, \"Measurement 1\", filename=\"Measurement_1_plot\")\nfun.plot(M2, \"Measurement 2\", filename=\"Measurement_2_plot\")\nfun.plot(M3, \"Measurement 3\", filename=\"Measurement_3_plot\")\nfun.plot(timedata, \"Time calibration\", filename=\"Time_calibration\")\n\n# Timecalibration part\n# First we distinguish the different gaussians using fun.plotrange and plot them (will be commented out as this has no further use currently)\n\nfun.plotrange(timedata,8860,8920,\"Peak 8500\",\"peak_8500\")\n\"\"\"\nfun.plotrange(timedata,10160,10200,\"Peak 10200\",\"peak_10200\")\nfun.plotrange(timedata,11500,11550,\"Peak 11500\",\"peak_11500\")\nfun.plotrange(timedata,12790,12820,\"Peak 12800\",\"peak_12800\")\nfun.plotrange(timedata,14120,14150,\"Peak 14100\",\"peak_14100\")\n\"\"\"\n\n# Then we use the gaussfit with information on range, amplitude, mu and sigma, we also get the mean values\n\ntimegaussfit_mean_1 = fun.custom_fit(timedata, 8884, 8900, [8892, 3, 20000], fun.gauss_function, \"Peak 8500\",\n \"Timecalibrationgauss/TimeGauss_peak_8500.png\", gauss_bool=True, xaxis=\"$\\Delta$t ADC units\")\ntimegaussfit_mean_2 = fun.custom_fit(timedata, 10170, 10190, [10182, 3, 2000], fun.gauss_function, \"Peak 10200\",\n \"Timecalibrationgauss/TimeGauss_peak_10200\", gauss_bool=True,xaxis=\"$\\Delta$t ADC units\" )\ntimegaussfit_mean_3 = fun.custom_fit(timedata, 11515, 11530, [11522, 3, 1500], fun.gauss_function, \"Peak 11500\",\n \"Timecalibrationgauss/TimeGauss_peak_11500.png\", gauss_bool=True,xaxis=\"$\\Delta$t ADC units\")\ntimegaussfit_mean_4 = fun.custom_fit(timedata, 12800, 12820, [12809, 3, 1400], fun.gauss_function, \"Peak 12800\",\n \"Timecalibrationgauss/TimeGauss_peak_12800.png\", gauss_bool=True,xaxis=\"$\\Delta$t ADC units\")\ntimegaussfit_mean_5 = fun.custom_fit(timedata, 14125, 14145, [14136, 3, 3500], fun.gauss_function, \"Peak 14100\",\n \"Timecalibrationgauss/TimeGauss_peak_14100.png\", gauss_bool=True,xaxis=\"$\\Delta$t ADC units\")\ntimecal_mean_lyst = [timegaussfit_mean_1, timegaussfit_mean_2, timegaussfit_mean_3, timegaussfit_mean_4,\n timegaussfit_mean_5]\n\n# Now we create the linear fit using known values for the delay\ndelay_lyst = [0, 4, 8, 12, 16]\ntimetranslation = fun.linear_fit(timecal_mean_lyst, delay_lyst,print_bool=True,xaxis=\"$\\Delta$t ADC units\",yaxis=\"$\\Delta$t [ns]\")\n\n# fun.plot(M1,\"Testrun\",filename= \"Testrun_with_timecal\",timetrans=timetranslation)\n\n# Measurement 1 part\n# we can do a gaussian fit for the first measurement + residuals\nfun.plot(M1, \"Measurement 1\", filename=\"_1_plot_adjusted_time\", timetrans=timetranslation)\ntime_offset = fun.custom_fit(M1, 7500, 8500, [7800 * timetranslation, 20 * timetranslation, 80], fun.gauss_function,\n \"Measurement 1\",\n \"Fits/Measurement_1_gaussfit.png\", timetrans=timetranslation, linewidthh=2,\n residual_bool=True, residual_file=\"Measurement_1\", gauss_bool=True, print_bool=True,print_text=\"Time offset (ns) measurement 1: \")\nprint(\"Time offset (ns): \" + str(time_offset))\n\n# Measurement 2 part\n#\nfun.plot(M2, \"Measurement 2\", filename=\"_2_plot_adjusted_time\", timetrans=timetranslation, linewidthh=1)\nfun.custom_fit(M2, 7500, 9000, [8000 * timetranslation, 10 * timetranslation, 12, 50 * timetranslation],\n fun.gauss_exp_convolution, \"Measurement 2\", \"Fits/Measurement_2_gauss_exp_convolutionfit.png\",\n timetrans=timetranslation, linewidthh=2, residual_bool=True, residual_file=\"Measurement_2\",print_bool=True, print_text=\"Lifetime (ns) measurement 2: \")\n\n\n# measurement 3 part\nfun.plot(M3, \"Measurement 3\", filename=\"_3_plot_adjusted_time\", timetrans=timetranslation)\nfun.custom_fit(M3,7500,10000,[25, 0.18, 200, 200, 20, 1],fun.final_gauss_exp_convolution,\"Measurement 3\",\"Fits/Measurement_3_final_gauss_exp_convolutionfit.png\",timetrans=timetranslation,linewidthh=2,residual_bool=True,residual_file=\"Measurement_3\",print_bool=True,print_text=\"Lifetime (ns) measurement 3: \",final_convolution_bool=True)\n#fun.plot_test(M3, 0, 18000, fun.final_gauss_exp_convolution, [27.2, 0.18, 200, 200, 2, 0.1],\n# timetrans=timetranslation)\n","repo_name":"Pepponita/Python-Stuff","sub_path":"KT/Code/DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8452914642","text":"from matplotlib import pyplot as plt\nimport os\nimport ktrain\nimport math\nimport parameter\nimport re\nfrom ktrain import vision as vis\nimport tensorflow as tf\nimport keras\n# from keras.optimizers import Adam\n# from tensorflow import keras\nfrom PIL import ImageFile\n# from keras.models import Model\n# from keras.layers import Dense, Flatten\n\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\n\n\n# Functions\ndef show_prediction(fname):\n fname = DATADIR + '/' + fname\n pred = round(predictor.predict_filename(fname)[0], 2)\n actual = p.search(fname).group(1)\n print(\"Predicted Price: %s | Actual Price: %s\" % (pred, actual))\n\n# Find optimal learning rate\nclass LRFind(tf.keras.callbacks.Callback):\n def __init__(self, min_lr, max_lr, n_rounds):\n self.min_lr = min_lr\n self.max_lr = max_lr\n self.step_up = (max_lr/min_lr) ** (1 / n_rounds)\n self.lrs = []\n self.losses = []\n\n def on_train_begin(self, logs=None):\n self.weights = self.model.get_weights()\n self.model.optimizer.lr = self.min_lr\n\n def on_train_batch_end(self, batch, logs=None):\n self.lrs.append(self.model.optimizer.lr.numpy())\n self.losses.append(logs[\"loss\"])\n self.model.optimizer.lr = self.model.optimizer.lr * self.step_up\n if self.model.optimizer.lr > self.max_lr:\n self.model.stop_training = True\n\n def on_train_end(self, logs=None):\n self.model.set_weights(self.weights)\n\n\n# Calculate difference between predicted and actual price\ndef calc_difference(fname):\n pred = round(predictor.predict_filename(fname)[0], 2)\n actual = float(p.search(fname).group(1))\n diff = abs(pred - actual)\n return diff\n\n\n# Calculate difference for MAPE\ndef calc_difference_mape(fname):\n pred = round(predictor.predict_filename(fname)[0], 2)\n actual = float(p.search(fname).group(1))\n diff = abs(pred - actual)\n diff_mape = abs(diff / actual)\n return diff_mape\n\n\n# Calculate MAE\ndef mae(test_data):\n differences = 0\n for element in test_data:\n fname = DATADIR + '/' + element\n differences += calc_difference(fname)\n mae = differences / len(test_data)\n return mae\n\n\n# Calculate RMSE\ndef rmse(test_data):\n differences = 0\n for element in test_data:\n fname = DATADIR + '/' + element\n differences += (calc_difference(fname))**2\n rmse = math.sqrt(differences / len(test_data))\n return rmse\n\n\n# Calculate MAPE\ndef mape(test_data):\n differences = 0\n for element in test_data:\n fname = DATADIR + '/' + element\n differences += calc_difference_mape(fname)\n mape = (differences / len(test_data)) * 100\n return mape\n\n\n\n# Filter price information from filename\npattern = r'_([^/]+)_\\d+_.jpg$'\np = re.compile(pattern)\nr = p.search('_0.01_1099561093123_.jpg')\nprint(r.group(1))\n\n# Specify directory\nDATADIR = parameter.Baseline_ResNet50.dir\n\n# Select train and test data\n(train_data, test_data, preproc) = vis.images_from_fname(DATADIR, pattern = pattern,\n is_regression = True,\n random_state = 42)\n\n# Selection of possible models\nvis.print_image_regression_models()\n\n# Model specification\npretrained_model = vis.image_regression_model('pretrained_resnet50',\n train_data = train_data,\n val_data = test_data,)\n\n# Remove last three layers from pretrained model\npretrained_model = keras.models.Model(pretrained_model.input,\n pretrained_model.layers[-3].output)\n\nmodel = keras.models.Sequential()\nmodel.add(pretrained_model)\nmodel.add(keras.layers.Dense(512, activation=\"relu\"))\nmodel.add(keras.layers.Dense(1))\n\n# Print complete model\nprint(model.summary())\n\n# Specify learner with parameters\nlearner = ktrain.get_learner(model = model,\n train_data = train_data,\n val_data = test_data,\n batch_size = 256)\n\n# Compile learner\nlearner.model.compile(optimizer=\"Adam\",\n loss=tf.keras.losses.MeanAbsoluteError(),\n metrics=[tf.keras.metrics.MeanAbsoluteError()])\n\n# Optional: optimal learning rate finder\n# ImageFile.LOAD_TRUNCATED_IMAGES = True\n# lr_find = LRFind(1e-10, 1e1, 400)\n# model.fit(\n# train_data,\n# steps_per_epoch=400,\n# epochs=1,\n# callbacks=[lr_find]\n# )\n#\n# plt.plot(lr_find.lrs, lr_find.losses)\n# plt.xscale('log')\n# plt.xlabel('Learning Rate')\n# plt.ylabel(\"MAE\")\n# plt.savefig(\"learning_rate.jpg\")\n# plt.show()\n\n\n# Training of model\nImageFile.LOAD_TRUNCATED_IMAGES = True\n# First layers are trained with weight adjustment\nhistory = learner.fit_onecycle(lr=1e-4, epochs=10)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.savefig('loss_diagram.jpg')\nplt.show()\n\n# layers frozen\nImageFile.LOAD_TRUNCATED_IMAGES = True\nlearner.freeze(15)\n# Last layers are trained with weight adjustment\nImageFile.LOAD_TRUNCATED_IMAGES = True\nhistory2 = learner.fit_onecycle(lr=1e-4, epochs=10)\n\n# Plot loss over epochs\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\n# Specify predictor\npredictor = ktrain.get_predictor(learner.model, preproc)\n# Optional: Save predictor\n# ktrain.get_predictor(learner.model, preproc).save(\"predictor_bs128\")\n\n# Store test data in a list\nvalidation_data = list(test_data.filenames)\n\n# Calculate error metrics\nmae_value = mae(validation_data)\nrmse_value = rmse(validation_data)\nmape_value = mape(validation_data)\nprint(str(mae_value), str(rmse_value), str(mape_value))\n\n# Print predicted prices for first 50 test datapoints\nfor element in validation_data[:50]:\n show_prediction(element)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BuecAle/NftPricePrediction","sub_path":"model/ImageData_ResNet50.py","file_name":"ImageData_ResNet50.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"41596065796","text":"from canaryd_packages import six\n\nfrom canaryd.plugin import Plugin\n\n# Mapping for iptables code arguments to variable names\nIPTABLES_ARGS = {\n '-A': 'chain',\n '-j': 'jump',\n\n # Boolean matches\n '-p': 'protocol',\n '-s': 'source',\n '-d': 'destination',\n '-i': 'in_interface',\n '-o': 'out_interface',\n\n # Logging\n '--log-prefix': 'log_prefix',\n\n # NAT exit rules\n '--to-destination': 'to_destination',\n '--to-source': 'to_source',\n '--to-ports': 'to_ports',\n}\n\n\nclass Iptables(Plugin):\n '''\n Tracks ``iptables`` chains, policies and rule changes.\n '''\n\n spec = ('chain', {\n 'policy': six.text_type,\n 'rules': [dict],\n })\n\n command = 'iptables-save'\n\n @staticmethod\n def parse(output):\n chains = {}\n\n for line in output.splitlines():\n # Parse the chains\n if line.startswith(':'):\n line = line[1:]\n\n chain, policy, _ = line.split()\n chains[chain] = {\n 'rules': [],\n 'policy': policy,\n }\n\n # Pass the rules\n if not line.startswith('-A'):\n continue\n\n bits = line.split()\n\n definition = {}\n\n key = None\n args = []\n not_arg = False\n\n def add_args():\n arg_string = ' '.join(args)\n\n if key in IPTABLES_ARGS:\n definition_key = (\n 'not_{0}'.format(IPTABLES_ARGS[key])\n if not_arg\n else IPTABLES_ARGS[key]\n )\n definition[definition_key] = arg_string\n else:\n definition.setdefault('extras', []).extend((key, arg_string))\n\n for bit in bits:\n if bit == '!':\n if key:\n add_args()\n args = []\n key = None\n\n not_arg = True\n\n elif bit.startswith('-'):\n if key:\n add_args()\n args = []\n not_arg = False\n\n key = bit\n\n else:\n args.append(bit)\n\n if key:\n add_args()\n\n if 'extras' in definition:\n definition['extras'] = definition['extras']\n\n chain = definition.pop('chain')\n chains[chain]['rules'].append(definition)\n\n return chains\n\n @staticmethod\n def get_action_for_change(change):\n # If we change rules only, make the action \"rules updated|addded|deleted\"\n if 'rules' in change.data and len(change.data) == 1:\n return 'rules {0}'.format(change.type)\n\n\nclass Ip6tables(Iptables):\n command = 'ip6tables-save'\n","repo_name":"Oxygem/canaryd","sub_path":"canaryd/plugins/iptables.py","file_name":"iptables.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"43991992115","text":"\"\"\"Process for receiving frames from the optical tracker and pushing them to a queue.\"\"\"\n\nimport logging\nimport multiprocessing as mp\nimport time\nfrom typing import Set, Tuple\nimport numpy as np\nimport zmq\nfrom scipy.spatial.transform import Rotation as R\nfrom stringcase import snakecase\n\nlog = logging.getLogger(__name__)\n\n\ndef from_quatpos(qp: np.ndarray) -> np.ndarray:\n \"\"\"Convert a quaternion and position to a 4x4 transform.\n\n Args:\n qp: A (7) array containing the quaternion and position.\n\n Returns:\n A 4x4 transform matrix.\n \"\"\"\n q = qp[:4]\n p = qp[4:]\n r = R.from_quat(q).as_matrix()\n t = np.eye(4)\n t[:3, :3] = r\n t[:3, 3] = p\n return t\n\n\nclass FrameListener(mp.Process):\n # to_frame, from_frame, filename\n\n def __init__(self, corridor_server_queue: mp.Queue, ip: str, port: int) -> None:\n \"\"\"Frame listener.\n\n Args:\n corridor_server_queue (mp.Queue): Queue for sending data to the corridor server.\n ip (str): IP address of the plus-tracker server.\n port (int): Port of the plus-tracker server.\"\"\"\n super().__init__()\n self.corridor_server_queue = corridor_server_queue\n self.ip = ip\n self.port = port\n\n def run(self):\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(f\"tcp://{self.ip}:{self.port}\")\n socket.setsockopt(zmq.RCVTIMEO, 5000)\n socket.setsockopt_string(zmq.SUBSCRIBE, \"\")\n\n # Startup delay to let other programs get setup.\n time.sleep(1)\n\n while True:\n try:\n msg = socket.recv_multipart()\n message_type = msg[0].decode(\"utf-8\")\n if message_type == \"frame\":\n to_frame = snakecase(msg[1].decode(\"utf-8\"))\n from_frame = snakecase(msg[2].decode(\"utf-8\"))\n transform = from_quatpos(np.frombuffer(msg[3], dtype=\"<f\"))\n timestamp = msg[4].decode(\"utf-8\")\n self.corridor_server_queue.put(\n (\"tracker_frame\", to_frame, from_frame, transform, timestamp)\n )\n else:\n log.warning(f\"Unknown message type {message_type}\")\n except KeyboardInterrupt:\n log.info(\"\\nExiting...\")\n exit(1)\n except Exception as e:\n time.sleep(0.1)\n continue\n","repo_name":"benjamindkilleen/IPCAI-pelvic-corridors","sub_path":"cortical_breach_detection/procs/frame_listener.py","file_name":"frame_listener.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"2041566045","text":"import numpy as np\nimport math\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\n\n\ndef performance_function(x1,x2):\n return x2 - np.abs(np.tan(x1)) - 1\n\n\n# Stage 1: Generation of Monte Carlo population\nnMC = 1000000\nx1 = np.random.normal(4, 2, size=nMC)\nx2 = np.random.normal(4, 2, size=nMC)\nS = np.column_stack((x1, x2))\nfunction_calls = 0\n\nMCS_pf = 0.7007\n# Stage 2: Definition of initial design of experiments (DoE)\nN1 = 500\nselected_indices = np.random.choice(len(S), N1, replace=False)\n\nDoE = S[selected_indices]\nPf_values = np.zeros(N1) # Array to store performance function evaluations\nfor i in range(N1):\n Pf_values[i] = 1 if performance_function(DoE[i, 0], DoE[i, 1]) <0 else 0 # Evaluate performance function\n function_calls += 1\n\n\n# Stage 3: Computation of MLP model\n\n\nmlp = MLPClassifier(hidden_layer_sizes=10, activation= 'logistic', solver = 'adam',learning_rate = 'adaptive', learning_rate_init= 0.01) # Customize the hidden layer sizes as needed\nmlp.fit(DoE, Pf_values)\n\nwhile True:\n # Stage 4: Prediction by MLP and estimation of probability of failure\n nMC = len(S)\n classes_hat = mlp.predict(S)\n \n y_pred_class = np.where(classes_hat > 0.5, 1, 0)\n Pf_hat = np.sum(y_pred_class == 0) / nMC\n # Stage 5: Identification of the best next point to evaluate based on distance to the threshold 0.5\n learning_values = np.abs(np.subtract(classes_hat, 0.5))\n x_best_index = np.argmin(learning_values)\n x_best = S[x_best_index]\n \n # Stage 6: Stopping condition on learning\n stopping_condition = min(learning_values) >= 0.1\n\n # Stage 7: Update of the previous design of experiments with the best point\n if stopping_condition:\n # Stopping condition met, learning is stopped\n diff_prob_percentage = (np.abs(Pf_hat - MCS_pf) / MCS_pf) *100\n print(diff_prob_percentage)\n cov_threshold = 3\n if diff_prob_percentage < cov_threshold:\n \n\n # Coefficient of variation is acceptable, stop AK-MCS\n print(\"AK-MCS finished. Probability of failure: \", Pf_hat)\n print(\"Number of calls to the performance function\", function_calls)\n break\n else:\n # Coefficient of variation is too high, update population\n new_x = np.random.normal(4, 4, nMC)\n new_y = np.random.normal(4, 4, nMC)\n new_points = np.column_stack((new_x, new_y)) \n S = np.vstack((S, new_points))\n else:\n # Stopping condition not met, update design of experiments\n x_best_performance = 1 if performance_function(x_best[0], x_best[1]) <0 else 0\n function_calls += 1\n Pf_values = np.concatenate((Pf_values, [x_best_performance]))\n DoE = np.vstack((DoE, x_best))\n \n mlp.fit(DoE, Pf_values)\n # Go back to Stage 4\n","repo_name":"omarBoua/SRA","sub_path":"old_essay/Active_learning_NN.py","file_name":"Active_learning_NN.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72000790873","text":"# Databricks notebook source\n# MAGIC %run ../utils/infra_utils\n\n# COMMAND ----------\n\nfrom dateutil import relativedelta\nfrom datetime import datetime\n\n# COMMAND ----------\n\nPANEL_PATH = dbutils.widgets.get(\"input_path\")\nOUTPUT_PATH = dbutils.widgets.get(\"output_path\")\nEXEC_DATE = datetime.strptime(dbutils.widgets.get(\"date\"), \"%Y-%m-%d\")\nENVS = generate_env_list(dbutils.widgets.get(\"envs\"))\n\n# COMMAND ----------\n\ndef generate_inner_vpn_suffix(d):\n return \"{}/{}/{:02d}/{:02d}/*\".format(PANEL_PATH, d.year, d.month, d.day)\n\n# COMMAND ----------\n\nPREVIOUS_DATE_PATH = generate_inner_vpn_suffix(EXEC_DATE - relativedelta.relativedelta(days=1))\nEXEC_DATE_PATH = generate_inner_vpn_suffix(EXEC_DATE)\nNEXT_DATE_PATH = generate_inner_vpn_suffix(EXEC_DATE + relativedelta.relativedelta(days=1))\n\n# COMMAND ----------\n\nprint(PREVIOUS_DATE_PATH)\nprint(EXEC_DATE_PATH)\nprint(NEXT_DATE_PATH)\n\n# COMMAND ----------\n\ningested_panel_df = (spark.read.parquet(PREVIOUS_DATE_PATH,EXEC_DATE_PATH,NEXT_DATE_PATH)\n .select(\"did\", F.col(\"country\").cast(\"int\").alias(\"country\"), F.col(\"bundle\").alias(\"source\"), \"model\", \"cv\", \"osv\", \"v\", F.col(\"tz\").cast(\"int\").alias(\"utc_offset\"), F.explode(\"events\").alias(\"event\"))\n .select('*', \"event.*\")\n .withColumn(\"local_ts\", (F.col(\"ts\")/1000).cast(\"timestamp\"))\n .withColumn(\"utc_ts\", (F.col(\"local_ts\").cast(\"bigint\") - F.col(\"utc_offset\")).cast(\"timestamp\"))\n .withColumn(\"exec_date\", F.from_utc_timestamp(F.col(\"utc_ts\"), \"EST\").cast('date'))\n .filter(F.col(\"exec_date\") == EXEC_DATE)\n .select(\"country\", \"source\", \"did\", \"model\", \"cv\", \"osv\", \"v\", \"local_ts\", \"utc_ts\", F.col(\"ts\").alias(\"local_ts_ms\"), \"host\", \"ua\")\n .orderBy(\"country\", \"source\", \"did\"))\n\n# COMMAND ----------\n\nwrite_output(ingested_panel_df.write.format(\"parquet\"), OUTPUT_PATH, ENVS, overwrite=True)","repo_name":"barrsw/ios_dau_cnb","sub_path":"ios_dau_cnb/ios_dau_cnb/ios-dau/ios-dau/preprocessing/01_ingest_panel.py","file_name":"01_ingest_panel.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41524648801","text":"import boto3\nimport botocore\n\ndynamodb = boto3.resource('dynamodb')\ntry:\n\ttable = dynamodb.create_table(TableName='Game', KeySchema=[\n\t\t{\n\t\t\t'AttributeName': 'Title',\n\t\t\t'KeyType': 'HASH' #Partition key\n\t\t},\n\t\t{\n\t\t\t'AttributeName': 'TimeEnoch',\n\t\t\t'KeyType': 'RANGE' #Sort key\n\t\t}\n\t\t],\n\t\tAttributeDefinitions=[\n\t\t{\n\t\t\t'AttributeName': 'Title',\n\t\t\t'AttributeType': 'S'\n\t\t},\n\t\t{\n\t\t\t'AttributeName': 'TimeEnoch',\n\t\t\t'AttributeType': 'N'\n\t\t}\n\t\t],\n\t\tProvisionedThroughput={\n\t\t\t'ReadCapacityUnits': 5,\n\t\t\t'WriteCapacityUnits': 5\n\t\t}\n\t\t)\nexcept botocore.exceptions.ClientError:\n\tprint('Game exists!')\n","repo_name":"samx816/InsightProject","sub_path":"DynamoDB/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43282445434","text":"from enum import Enum\n\n\n# Characters that make up the TJ-Wriggle board\nclass Chars(Enum):\n UP = '^'\n DOWN = 'v'\n LEFT = '<'\n RIGHT = '>'\n WALL = 'x'\n EMPTY = 'e'\n HEAD_UP = 'U'\n HEAD_DOWN = 'D'\n HEAD_LEFT = 'L'\n HEAD_RIGHT = 'R'\n","repo_name":"wwlorey/tj-wriggle-ai","sub_path":"tj_wriggle/chars.py","file_name":"chars.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38586719656","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Function\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.optim as optim\r\nimport argparse\r\nimport mydata\r\nfrom models import *\r\nfrom train import *\r\nfrom test import *\r\nfrom pretrain import *\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='SOC')\r\n parser.add_argument('--mkdir',type=str,default=None)\r\n parser.add_argument('--lr',type=float,default=0.001)\r\n parser.add_argument('--source_temp',type=str,default='25')\r\n parser.add_argument('--target_temp',type=str,default='10')\r\n parser.add_argument('--test_set',type=str,default='target_test')\r\n parser.add_argument('--mode',type=str,default='train')\r\n parser.add_argument('--epochs',type=int,default=2000) \r\n parser.add_argument('--eval_interval',type=int,default=200)\r\n parser.add_argument('--batch_size',type=int,default=64)\r\n parser.add_argument('--lamb1',type=float,default=1)\r\n parser.add_argument('--lamb2',type=float,default=1)\r\n parser.add_argument('--lamb3',type=float,default=1)\r\n parser.add_argument('--device', type=str, default='cuda:0', help='cuda device id')\r\n \r\n parser.add_argument('--source_path',type=str,default=None)\r\n parser.add_argument('--target_path',type=str,default=None) \r\n\r\n parser.add_argument('--ifsave',action='store_true')\r\n parser.add_argument('--load_model',action='store_true')\r\n parser.add_argument('--model_path',type=str,default='./models/best.pt')\r\n parser.add_argument('--ifpretrain',action='store_true')\r\n args = parser.parse_args()\r\n \r\n models = {}\r\n models['conv'] = conv()\r\n models['lstm'] = lstm()\r\n models['fc'] = fc()\r\n models['regression'] = regression()\r\n models['conv_s'] = conv()\r\n models['lstm_s'] = lstm()\r\n models['fc_s'] = fc()\r\n models['regression_s'] = regression()\r\n models['discriminator'] = Discriminator()\r\n\r\n criterion = nn.MSELoss(reduction='sum')\r\n optimizers = {}\r\n optimizers['conv'] = optim.Adam(models['conv'].parameters(),lr=args.lr)\r\n optimizers['lstm'] = optim.Adam(models['lstm'].parameters(),lr=args.lr)\r\n optimizers['fc'] = optim.Adam(models['fc'].parameters(),lr=args.lr)\r\n optimizers['regression'] = optim.Adam(models['regression'].parameters(),lr=args.lr) \r\n optimizers['discriminator'] = optim.Adam(models['discriminator'].parameters(),lr=args.lr)\r\n \r\n source_data_path,source_train_set,source_test_set = get_trainpath(args.source_path)\r\n target_data_path,target_train_set,target_test_set = get_testpath(args.target_path)\r\n if args.mode == 'train':\r\n print('if save:{}'.format(args.ifsave))\r\n print('if pretrain:',args.ifpretrain)\r\n print('load model:{}'.format(args.load_model))\r\n print('epoch',args.epochs,'batch_size',args.batch_size)\r\n if args.load_model:\r\n print('model path:',args.model_path)\r\n if args.ifpretrain:\r\n pretrain(args.mkdir,args.source_temp,args.target_temp,source_data_path,source_train_set,source_test_set,models, criterion, optimizers, args.batch_size,2000, 500, seed=100, device_type=args.device, ifsave=True,load_model=False)\r\n train(args.mkdir,args.source_temp,args.target_temp,source_data_path,source_train_set,source_test_set,target_data_path,target_train_set,target_test_set,models, criterion, optimizers, args.batch_size,args.epochs, args.eval_interval,args.lamb1,args.lamb2,args.lamb3, seed=100, device_type=args.device, ifsave=args.ifsave,load_model=args.load_model,model_path=args.model_path)\r\n elif args.mode == 'pretrain':\r\n args.ifsave = True\r\n print('if save:{}'.format(args.ifsave))\r\n print('load model:{}'.format(args.load_model))\r\n print('epoch',args.epochs,'batch_size',args.batch_size)\r\n pretrain(args.mkdir,args.source_temp,args.target_temp,source_data_path,source_train_set,source_test_set,models, criterion, optimizers, args.batch_size,args.epochs, args.eval_interval, seed=100, device_type=args.device, ifsave=args.ifsave,load_model=args.load_model)\r\n elif args.mode == 'test':\r\n print('test mode')\r\n print('test set:',args.test_set)\r\n data_path = None\r\n test_set = None\r\n #define your own test set in: utils.py get_trainpath()\r\n if args.test_set == 'Pan_test':\r\n test_set = mydata.Pan_test_set\r\n data_path = mydata.Pan_data_path \r\n elif args.test_set == 'LG_test':\r\n test_set = mydata.LG_test_set\r\n data_path = mydata.LG_data_path \r\n print('data path:',data_path)\r\n print('test set',test_set)\r\n test(args.mkdir,args.target_temp,models, data_path,test_set, seed=0, device_type=args.device,load_model_path=args.model_path)\r\n \r\n\r\n","repo_name":"TL-UESTC/TATN","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"1296799009","text":"#!/usr/bin/env python3\nimport argparse\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.functions import split\nfrom pyspark.sql.functions import collect_list\nfrom pyspark.sql.functions import col\nfrom pyspark.sql.functions import array\nfrom heapq import nlargest\nfrom operator import itemgetter\n\n\n\ndef prefered(x):\n lst=x[1]\n if len(lst)>5:\n return (x[0],nlargest(5, lst, key=itemgetter(1)))\n else:\n return (x[0],nlargest(len(lst), lst, key=itemgetter(1)))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_path\", type=str, help=\"Input file path\")\nparser.add_argument(\"--output_path\", type=str, help=\"Output folder path\")\n\nargs = parser.parse_args()\ninput_filepath, output_filepath = args.input_path, args.output_path\n\nspark = SparkSession \\\n .builder \\\n .appName(\"prodotti preferiti di ogni utente\") \\\n .config(\"spark.executor.memory\", \"2000m\") \\\n .config(\"spark.memory.offHeap.size\",\"16g\") \\\n .config(\"spark.memory.offHeap.enabled\",True) \\\n .getOrCreate()\n\n\ninput_df = spark.read.text(input_filepath).cache()\nsplit_col = split(input_df['value'], '\\t')\n\n#(utente, prodotto, voto)\nutente_prodotto_voto = input_df.withColumn('user', split_col.getItem(1)) \\\n .withColumn('prodotto', split_col.getItem(0)) \\\n .withColumn('voto', split_col.getItem(2).cast(IntegerType())) \\\n .drop(\"value\")\n\n#(utente, lista di prodottoVoto) \nutente_lista_prodottoVoto = utente_prodotto_voto.groupby(\"user\").agg(collect_list(array(\"prodotto\",\"voto\")).alias('ProdottoVoto')) \\\n .drop(\"prodotto\") \\\n .drop(\"voto\")\n\n#(utente, lista di prodottoVotoPreferiti)\nutente_lista_preferiti = utente_lista_prodottoVoto.rdd.map(f=lambda x: prefered(x))\n\nutente_lista_preferiti.saveAsTextFile(output_filepath)\n\nspark.stop()","repo_name":"mariocuomo/hadoopAtWork","sub_path":"progetto1/esercizio-2/spark-sql/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"424036763","text":"from train import train_normal, seed_everything\nimport os\nimport pandas as pd\nimport torch\nimport wandb\n\n\n\n\ntrain_dir = '/opt/ml/input/data/train'\ntrain_df = pd.read_csv(os.path.join(train_dir, 'combine.csv'))\nvalid_df = pd.read_csv(os.path.join(train_dir, 'train_labeled_val.csv'))\n\nmodel_name = 'vit_large_patch16_224'\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size=32\nearly_stop=5\nlearning_rate=1e-3\nnum_epochs=12\ncutMix_Flag = True\nseed_everything(44)\ncontinue_Flag = False\nif continue_Flag: \n continue_dict = torch.load('/opt/ml/input/data/eval/best_param/model_vitL_epoch_0.pt')\nelse:\n continue_dict = None\n\nconfig={\"learning_rate\" : learning_rate}\nwandb.init(project = \"Image_Classification\", entity='donggunseo', config=config)\n\ntrain_normal(model_name, num_epochs, batch_size, early_stop, learning_rate, train_df, valid_df, cutMix=cutMix_Flag, continue_dict = continue_dict)\n","repo_name":"boostcampaitech2/image-classification-level1-07","sub_path":"model_ViT_Large/sdg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72841941273","text":"from functools import reduce\nfrom operator import add\n\ndef exceptionInt(a):\n try :\n return int(a)\n\n except ValueError:\n print(\"skipping element, garbage detected: \", a)\n return 0\n\n\nwith open(\"numbers.dat\") as inputFile:\n for line in inputFile:\n a = line.split()\n\n nInLine = list(map(exceptionInt,a))\n print(nInLine)\n\n print(\"reduction: \",reduce(add, nInLine,0))\n","repo_name":"felicepantaleo/playground","sub_path":"python/summing.py","file_name":"summing.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27385282494","text":"def Solution(arr):\n maxSum = 0\n count = 0\n for i in range(len(arr)):\n if arr[i] >= 0:\n maxSum += arr[i]\n count += 1\n return [maxSum, count]\n\n\nN = int(input())\narr = list(map(int, input().split()))\nres = Solution(arr)\nprint(res[0])\nprint(res[1])\n","repo_name":"hacetheworld/competitive-programming-practices","sub_path":"problems/codeforce/800-1200/max-sum.py","file_name":"max-sum.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"37283963423","text":"from crum import get_current_user\nfrom django.conf import settings\nfrom django.db.models import Exists, OuterRef, Q\nfrom dojo.models import Product, Product_Member, Product_Type_Member\nfrom dojo.authorization.authorization import get_roles_for_permission\n\n\ndef get_authorized_products(permission):\n user = get_current_user()\n if user.is_superuser:\n return Product.objects.all().order_by('name')\n\n if settings.FEATURE_NEW_AUTHORIZATION:\n roles = get_roles_for_permission(permission)\n authorized_product_type_roles = Product_Type_Member.objects.filter(\n product_type=OuterRef('prod_type_id'),\n user=user,\n role__in=roles)\n authorized_product_roles = Product_Member.objects.filter(\n product=OuterRef('pk'),\n user=user,\n role__in=roles)\n products = Product.objects.annotate(\n prod_type__member=Exists(authorized_product_type_roles),\n member=Exists(authorized_product_roles)).order_by('name')\n products = products.filter(\n Q(prod_type__member=True) |\n Q(member=True))\n else:\n if user.is_staff:\n products = Product.objects.all().order_by('name')\n else:\n products = Product.objects.filter(\n Q(authorized_users__in=[user]) |\n Q(prod_type__authorized_users__in=[user])).order_by('name')\n return products\n","repo_name":"leonardobedazup/beda-DefectDojo-1.13.0","sub_path":"dojo/product/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40052352801","text":"import math\nimport numpy as np\nimport random\ndef second(a):\n return a[1]\ndef apply(inst):\n majority = []\n minority = []\n for i in inst:\n i = list(i)\n if i[-1]=='N':\n majority.append(i)\n else:\n minority.append(i)\n k = len(majority)//len(minority)\n synthetic = []\n for i in range(len(minority)):\n x = minority[i][:-1]\n nn = []\n for j in range(len(minority)):\n if i != j:\n z = minority[j][:-1]\n dist = math.sqrt(sum([(x[l]-z[l])**2 for l in range(len(x))]))\n nn.append((minority[j], dist))\n knn = sorted(nn, key=second)[:k]\n new = [a[0] for a in knn]\n r = random.random()\n while r==0 or r==1:\n r = random.random()\n for j in range(len(new)):\n temp = [x[l]+r*abs(x[l]-new[j][l]) for l in range(len(new[j])-1)]\n temp.append(new[j][-1])\n synthetic.append(temp)\n for i in synthetic:\n inst.append(i)\n return inst\n","repo_name":"parth-pathak/Fault-prediction-using-NASA-MDP-Dataset","sub_path":"smote.py","file_name":"smote.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"73739048472","text":"import argparse\nimport copy\nimport numpy as np\nimport os\nimport random\nfrom sklearn.utils import shuffle\nimport tensorflow as tf\nfrom time import time\ntry:\n from tensorflow.python.ops.nn_ops import leaky_relu\nexcept ImportError:\n from tensorflow.python.framework import ops\n from tensorflow.python.ops import math_ops\n\n\n def leaky_relu(features, alpha=0.2, name=None):\n with ops.name_scope(name, \"LeakyRelu\", [features, alpha]):\n features = ops.convert_to_tensor(features, name=\"features\")\n alpha = ops.convert_to_tensor(alpha, name=\"alpha\")\n return math_ops.maximum(alpha * features, features)\n\nfrom load import load_cla_data\nfrom evaluator import evaluate\n\nclass AWLSTM:\n def __init__(self, data_path, model_path, model_save_path, parameters, steps=1, epochs=50,\n batch_size=256, gpu=False, tra_date='2014-01-02',\n val_date='2015-08-03', tes_date='2015-10-01', att=0, hinge=0,\n fix_init=0, adv=0, reload=0):\n self.data_path = data_path\n self.model_path = model_path\n self.model_save_path = model_save_path\n # model parameters\n self.paras = copy.copy(parameters)\n # training parameters\n self.steps = steps\n self.epochs = epochs\n self.batch_size = batch_size\n self.gpu = gpu\n\n if att == 1:\n self.att = True\n else:\n self.att = False\n if hinge == 1:\n self.hinge = True\n else:\n self.hinge = False\n if fix_init == 1:\n self.fix_init = True\n else:\n self.fix_init = False\n if adv == 1:\n self.adv_train = True\n else:\n self.adv_train = False\n if reload == 1:\n self.reload = True\n else:\n self.reload = False\n\n # load data\n self.tra_date = tra_date\n self.val_date = val_date\n self.tes_date = tes_date\n self.tra_pv, self.tra_wd, self.tra_gt, \\\n self.val_pv, self.val_wd, self.val_gt, \\\n self.tes_pv, self.tes_wd, self.tes_gt = load_cla_data(\n self.data_path,\n tra_date, val_date, tes_date, seq=self.paras['seq']\n )\n self.fea_dim = self.tra_pv.shape[2]\n\n def get_batch(self, sta_ind=None):\n if sta_ind is None:\n sta_ind = random.randrange(0, self.tra_pv.shape[0])\n if sta_ind + self.batch_size < self.tra_pv.shape[0]:\n end_ind = sta_ind + self.batch_size\n else:\n sta_ind = self.tra_pv.shape[0] - self.batch_size\n end_ind = self.tra_pv.shape[0]\n return self.tra_pv[sta_ind:end_ind, :, :], \\\n self.tra_wd[sta_ind:end_ind, :, :], \\\n self.tra_gt[sta_ind:end_ind, :]\n\n def adv_part(self, adv_inputs):\n print('adversial part')\n if self.att:\n with tf.variable_scope('pre_fc'):\n self.fc_W = tf.get_variable(\n 'weights', dtype=tf.float32,\n shape=[self.paras['unit'] * 2, 1],\n initializer=tf.glorot_uniform_initializer()\n )\n self.fc_b = tf.get_variable(\n 'biases', dtype=tf.float32,\n shape=[1, ],\n initializer=tf.zeros_initializer()\n )\n if self.hinge:\n pred = tf.nn.bias_add(\n tf.matmul(adv_inputs, self.fc_W), self.fc_b\n )\n else:\n pred = tf.nn.sigmoid(\n tf.nn.bias_add(tf.matmul(self.fea_con, self.fc_W),\n self.fc_b)\n )\n else:\n # One hidden layer\n if self.hinge:\n pred = tf.layers.dense(\n adv_inputs, units=1, activation=None,\n name='pre_fc',\n kernel_initializer=tf.glorot_uniform_initializer()\n )\n else:\n pred = tf.layers.dense(\n adv_inputs, units=1, activation=tf.nn.sigmoid,\n name='pre_fc',\n kernel_initializer=tf.glorot_uniform_initializer()\n )\n return pred\n\n def construct_graph(self):\n print('is pred_lstm')\n if self.gpu == True:\n device_name = '/gpu:0'\n else:\n device_name = '/cpu:0'\n print('device name:', device_name)\n with tf.device(device_name):\n tf.reset_default_graph()\n if self.fix_init:\n tf.set_random_seed(123456)\n\n self.gt_var = tf.placeholder(tf.float32, [None, 1])\n self.pv_var = tf.placeholder(\n tf.float32, [None, self.paras['seq'], self.fea_dim]\n )\n self.wd_var = tf.placeholder(\n tf.float32, [None, self.paras['seq'], 5]\n )\n\n self.lstm_cell = tf.contrib.rnn.BasicLSTMCell(\n self.paras['unit']\n )\n\n # self.outputs, _ = tf.nn.dynamic_rnn(\n # # self.outputs, _ = tf.nn.static_rnn(\n # self.lstm_cell, self.pv_var, dtype=tf.float32\n # # , initial_state=ini_sta\n # )\n\n self.in_lat = tf.layers.dense(\n self.pv_var, units=self.fea_dim,\n activation=tf.nn.tanh, name='in_fc',\n kernel_initializer=tf.glorot_uniform_initializer()\n )\n\n self.outputs, _ = tf.nn.dynamic_rnn(\n # self.outputs, _ = tf.nn.static_rnn(\n self.lstm_cell, self.in_lat, dtype=tf.float32\n # , initial_state=ini_sta\n )\n\n self.loss = 0\n self.adv_loss = 0\n self.l2_norm = 0\n if self.att:\n with tf.variable_scope('lstm_att') as scope:\n self.av_W = tf.get_variable(\n name='att_W', dtype=tf.float32,\n shape=[self.paras['unit'], self.paras['unit']],\n initializer=tf.glorot_uniform_initializer()\n )\n self.av_b = tf.get_variable(\n name='att_h', dtype=tf.float32,\n shape=[self.paras['unit']],\n initializer=tf.zeros_initializer()\n )\n self.av_u = tf.get_variable(\n name='att_u', dtype=tf.float32,\n shape=[self.paras['unit']],\n initializer=tf.glorot_uniform_initializer()\n )\n\n self.a_laten = tf.tanh(\n tf.tensordot(self.outputs, self.av_W,\n axes=1) + self.av_b)\n self.a_scores = tf.tensordot(self.a_laten, self.av_u,\n axes=1,\n name='scores')\n self.a_alphas = tf.nn.softmax(self.a_scores, name='alphas')\n\n self.a_con = tf.reduce_sum(\n self.outputs * tf.expand_dims(self.a_alphas, -1), 1)\n self.fea_con = tf.concat(\n [self.outputs[:, -1, :], self.a_con],\n axis=1)\n print('adversarial scope')\n # training loss\n self.pred = self.adv_part(self.fea_con)\n if self.hinge:\n self.loss = tf.losses.hinge_loss(self.gt_var, self.pred)\n else:\n self.loss = tf.losses.log_loss(self.gt_var, self.pred)\n\n self.adv_loss = self.loss * 0\n\n # adversarial loss\n if self.adv_train:\n print('gradient noise')\n self.delta_adv = tf.gradients(self.loss, [self.fea_con])[0]\n tf.stop_gradient(self.delta_adv)\n self.delta_adv = tf.nn.l2_normalize(self.delta_adv, axis=1)\n self.adv_pv_var = self.fea_con + \\\n self.paras['eps'] * self.delta_adv\n\n scope.reuse_variables()\n self.adv_pred = self.adv_part(self.adv_pv_var)\n if self.hinge:\n self.adv_loss = tf.losses.hinge_loss(self.gt_var, self.adv_pred)\n else:\n self.adv_loss = tf.losses.log_loss(self.gt_var, self.adv_pred)\n else:\n with tf.variable_scope('lstm_att') as scope:\n print('adversarial scope')\n # training loss\n self.pred = self.adv_part(self.outputs[:, -1, :])\n if self.hinge:\n self.loss = tf.losses.hinge_loss(self.gt_var, self.pred)\n else:\n self.loss = tf.losses.log_loss(self.gt_var, self.pred)\n\n self.adv_loss = self.loss * 0\n\n # adversarial loss\n if self.adv_train:\n print('gradient noise')\n self.delta_adv = tf.gradients(self.loss, [self.outputs[:, -1, :]])[0]\n tf.stop_gradient(self.delta_adv)\n self.delta_adv = tf.nn.l2_normalize(self.delta_adv,\n axis=1)\n self.adv_pv_var = self.outputs[:, -1, :] + \\\n self.paras['eps'] * self.delta_adv\n\n scope.reuse_variables()\n self.adv_pred = self.adv_part(self.adv_pv_var)\n if self.hinge:\n self.adv_loss = tf.losses.hinge_loss(self.gt_var,\n self.adv_pred)\n else:\n self.adv_loss = tf.losses.log_loss(self.gt_var,\n self.adv_pred)\n\n # regularizer\n self.tra_vars = tf.trainable_variables('lstm_att/pre_fc')\n for var in self.tra_vars:\n self.l2_norm += tf.nn.l2_loss(var)\n\n self.obj_func = self.loss + \\\n self.paras['alp'] * self.l2_norm + \\\n self.paras['bet'] * self.adv_loss\n\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.paras['lr']\n ).minimize(self.obj_func)\n\n def get_latent_rep(self):\n self.construct_graph()\n\n sess = tf.Session()\n saver = tf.train.Saver()\n if self.reload:\n saver.restore(sess, self.model_path)\n print('model restored')\n else:\n sess.run(tf.global_variables_initializer())\n\n bat_count = self.tra_pv.shape[0] // self.batch_size\n if not (self.tra_pv.shape[0] % self.batch_size == 0):\n bat_count += 1\n\n tr_lat_rep = np.zeros([bat_count * self.batch_size, self.paras['unit'] * 2],\n dtype=np.float32)\n tr_gt = np.zeros([bat_count * self.batch_size, 1], dtype=np.float32)\n for j in range(bat_count):\n pv_b, wd_b, gt_b = self.get_batch(j * self.batch_size)\n feed_dict = {\n self.pv_var: pv_b,\n self.wd_var: wd_b,\n self.gt_var: gt_b\n }\n lat_rep, cur_obj, cur_loss, cur_l2, cur_al = sess.run(\n (self.fea_con, self.obj_func, self.loss, self.l2_norm,\n self.adv_loss),\n feed_dict\n )\n print(lat_rep.shape)\n tr_lat_rep[j * self.batch_size: (j + 1) * self.batch_size, :] = lat_rep\n tr_gt[j * self.batch_size: (j + 1) * self.batch_size,:] = gt_b\n\n # test on validation set\n feed_dict = {\n self.pv_var: self.val_pv,\n self.wd_var: self.val_wd,\n self.gt_var: self.val_gt\n }\n val_loss, val_lat_rep, val_pre = sess.run(\n (self.loss, self.fea_con, self.pred), feed_dict\n )\n cur_val_perf = evaluate(val_pre, self.val_gt, self.hinge)\n print('\\tVal per:', cur_val_perf)\n\n sess.close()\n tf.reset_default_graph()\n np.savetxt(self.model_save_path + '_val_lat_rep.csv', val_lat_rep)\n np.savetxt(self.model_save_path + '_tr_lat_rep.csv', tr_lat_rep)\n np.savetxt(self.model_save_path + '_val_gt.csv', self.val_gt)\n np.savetxt(self.model_save_path + '_tr_gt.csv', tr_gt)\n\n def predict_adv(self):\n self.construct_graph()\n\n sess = tf.Session()\n saver = tf.train.Saver()\n if self.reload:\n saver.restore(sess, self.model_path)\n print('model restored')\n else:\n sess.run(tf.global_variables_initializer())\n\n bat_count = self.tra_pv.shape[0] // self.batch_size\n if not (self.tra_pv.shape[0] % self.batch_size == 0):\n bat_count += 1\n tra_perf = None\n adv_perf = None\n for j in range(bat_count):\n pv_b, wd_b, gt_b = self.get_batch(j * self.batch_size)\n feed_dict = {\n self.pv_var: pv_b,\n self.wd_var: wd_b,\n self.gt_var: gt_b\n }\n cur_pre, cur_adv_pre, cur_obj, cur_loss, cur_l2, cur_al = sess.run(\n (self.pred, self.adv_pred, self.obj_func, self.loss, self.l2_norm,\n self.adv_loss),\n feed_dict\n )\n cur_tra_perf = evaluate(cur_pre, gt_b, self.hinge)\n cur_adv_perf = evaluate(cur_adv_pre, gt_b, self.hinge)\n if tra_perf is None:\n tra_perf = copy.copy(cur_tra_perf)\n else:\n for metric in tra_perf.keys():\n tra_perf[metric] = tra_perf[metric] + cur_tra_perf[metric]\n if adv_perf is None:\n adv_perf = copy.copy(cur_adv_perf)\n else:\n for metric in adv_perf.keys():\n adv_perf[metric] = adv_perf[metric] + cur_adv_perf[metric]\n for metric in tra_perf.keys():\n tra_perf[metric] = tra_perf[metric] / bat_count\n adv_perf[metric] = adv_perf[metric] / bat_count\n\n print('Clean samples performance:', tra_perf)\n print('Adversarial samples performance:', adv_perf)\n\n # test on validation set\n feed_dict = {\n self.pv_var: self.val_pv,\n self.wd_var: self.val_wd,\n self.gt_var: self.val_gt\n }\n val_loss, val_pre, val_adv_pre = sess.run(\n (self.loss, self.pred, self.adv_pred), feed_dict\n )\n cur_valid_perf = evaluate(val_pre, self.val_gt, self.hinge)\n print('\\tVal per clean:', cur_valid_perf)\n adv_valid_perf = evaluate(val_adv_pre, self.val_gt, self.hinge)\n print('\\tVal per adversarial:', adv_valid_perf)\n\n # test on testing set\n feed_dict = {\n self.pv_var: self.tes_pv,\n self.wd_var: self.tes_wd,\n self.gt_var: self.tes_gt\n }\n test_loss, tes_pre, tes_adv_pre = sess.run(\n (self.loss, self.pred, self.adv_pred), feed_dict\n )\n cur_test_perf = evaluate(tes_pre, self.tes_gt, self.hinge)\n print('\\tTest per clean:', cur_test_perf)\n adv_test_perf = evaluate(tes_adv_pre, self.tes_gt, self.hinge)\n print('\\tTest per adversarial:', adv_test_perf)\n\n sess.close()\n tf.reset_default_graph()\n\n def predict_record(self):\n self.construct_graph()\n\n sess = tf.Session()\n saver = tf.train.Saver()\n if self.reload:\n saver.restore(sess, self.model_path)\n print('model restored')\n else:\n sess.run(tf.global_variables_initializer())\n\n # test on validation set\n feed_dict = {\n self.pv_var: self.val_pv,\n self.wd_var: self.val_wd,\n self.gt_var: self.val_gt\n }\n val_loss, val_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_valid_perf = evaluate(val_pre, self.val_gt, self.hinge)\n print('\\tVal per:', cur_valid_perf, '\\tVal loss:', val_loss)\n np.savetxt(self.model_save_path + '_val_prediction.csv', val_pre)\n\n # test on testing set\n feed_dict = {\n self.pv_var: self.tes_pv,\n self.wd_var: self.tes_wd,\n self.gt_var: self.tes_gt\n }\n test_loss, tes_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_test_perf = evaluate(tes_pre, self.tes_gt, self.hinge)\n print('\\tTest per:', cur_test_perf, '\\tTest loss:', test_loss)\n np.savetxt(self.model_save_path + '_tes_prediction.csv', tes_pre)\n sess.close()\n tf.reset_default_graph()\n\n def test(self):\n self.construct_graph()\n\n sess = tf.Session()\n saver = tf.train.Saver()\n if self.reload:\n saver.restore(sess, self.model_path)\n print('model restored')\n else:\n sess.run(tf.global_variables_initializer())\n\n # test on validation set\n feed_dict = {\n self.pv_var: self.val_pv,\n self.wd_var: self.val_wd,\n self.gt_var: self.val_gt\n }\n val_loss, val_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_valid_perf = evaluate(val_pre, self.val_gt, self.hinge)\n print('\\tVal per:', cur_valid_perf, '\\tVal loss:', val_loss)\n\n # test on testing set\n feed_dict = {\n self.pv_var: self.tes_pv,\n self.wd_var: self.tes_wd,\n self.gt_var: self.tes_gt\n }\n test_loss, tes_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_test_perf = evaluate(tes_pre, self.tes_gt, self.hinge)\n print('\\tTest per:', cur_test_perf, '\\tTest loss:', test_loss)\n sess.close()\n tf.reset_default_graph()\n\n def train(self, tune_para=False):\n self.construct_graph()\n\n sess = tf.Session()\n saver = tf.train.Saver()\n if self.reload:\n saver.restore(sess, self.model_path)\n print('model restored')\n else:\n sess.run(tf.global_variables_initializer())\n\n best_valid_pred = np.zeros(self.val_gt.shape, dtype=float)\n best_test_pred = np.zeros(self.tes_gt.shape, dtype=float)\n\n best_valid_perf = {\n 'acc': 0, 'mcc': -2\n }\n best_test_perf = {\n 'acc': 0, 'mcc': -2\n }\n\n bat_count = self.tra_pv.shape[0] // self.batch_size\n if not (self.tra_pv.shape[0] % self.batch_size == 0):\n bat_count += 1\n for i in range(self.epochs):\n t1 = time()\n # first_batch = True\n tra_loss = 0.0\n tra_obj = 0.0\n l2 = 0.0\n tra_adv = 0.0\n for j in range(bat_count):\n pv_b, wd_b, gt_b = self.get_batch(j * self.batch_size)\n feed_dict = {\n self.pv_var: pv_b,\n self.wd_var: wd_b,\n self.gt_var: gt_b\n }\n cur_pre, cur_obj, cur_loss, cur_l2, cur_al, batch_out = sess.run(\n (self.pred, self.obj_func, self.loss, self.l2_norm, self.adv_loss,\n self.optimizer),\n feed_dict\n )\n\n tra_loss += cur_loss\n tra_obj += cur_obj\n l2 += cur_l2\n tra_adv += cur_al\n print('----->>>>> Training:', tra_obj / bat_count,\n tra_loss / bat_count, l2 / bat_count, tra_adv / bat_count)\n\n if not tune_para:\n tra_loss = 0.0\n tra_obj = 0.0\n l2 = 0.0\n tra_acc = 0.0\n for j in range(bat_count):\n pv_b, wd_b, gt_b = self.get_batch(\n j * self.batch_size)\n feed_dict = {\n self.pv_var: pv_b,\n self.wd_var: wd_b,\n self.gt_var: gt_b\n }\n cur_obj, cur_loss, cur_l2, cur_pre = sess.run(\n (self.obj_func, self.loss, self.l2_norm, self.pred),\n feed_dict\n )\n cur_tra_perf = evaluate(cur_pre, gt_b, self.hinge)\n tra_loss += cur_loss\n l2 += cur_l2\n tra_obj += cur_obj\n tra_acc += cur_tra_perf['acc']\n print('Training:', tra_obj / bat_count, tra_loss / bat_count,\n l2 / bat_count, '\\tTrain per:', tra_acc / bat_count)\n\n # test on validation set\n feed_dict = {\n self.pv_var: self.val_pv,\n self.wd_var: self.val_wd,\n self.gt_var: self.val_gt\n }\n val_loss, val_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_valid_perf = evaluate(val_pre, self.val_gt, self.hinge)\n print('\\tVal per:', cur_valid_perf, '\\tVal loss:', val_loss)\n\n # test on testing set\n feed_dict = {\n self.pv_var: self.tes_pv,\n self.wd_var: self.tes_wd,\n self.gt_var: self.tes_gt\n }\n test_loss, tes_pre = sess.run(\n (self.loss, self.pred), feed_dict\n )\n cur_test_perf = evaluate(tes_pre, self.tes_gt, self.hinge)\n print('\\tTest per:', cur_test_perf, '\\tTest loss:', test_loss)\n\n if cur_valid_perf['acc'] > best_valid_perf['acc']:\n best_valid_perf = copy.copy(cur_valid_perf)\n best_valid_pred = copy.copy(val_pre)\n best_test_perf = copy.copy(cur_test_perf)\n best_test_pred = copy.copy(tes_pre)\n if not tune_para:\n saver.save(sess, self.model_save_path)\n self.tra_pv, self.tra_wd, self.tra_gt = shuffle(\n self.tra_pv, self.tra_wd, self.tra_gt, random_state=0\n )\n t4 = time()\n print('epoch:', i, ('time: %.4f ' % (t4 - t1)))\n print('\\nBest Valid performance:', best_valid_perf)\n print('\\tBest Test performance:', best_test_perf)\n sess.close()\n tf.reset_default_graph()\n if tune_para:\n return best_valid_perf, best_test_perf\n return best_valid_pred, best_test_pred\n\n def update_model(self, parameters):\n data_update = False\n if not parameters['seq'] == self.paras['seq']:\n data_update = True\n for name, value in parameters.items():\n self.paras[name] = value\n if data_update:\n self.tra_pv, self.tra_wd, self.tra_gt, \\\n self.val_pv, self.val_wd, self.val_gt, \\\n self.tes_pv, self.tes_wd, self.tes_gt = load_cla_data(\n self.data_path,\n self.tra_date, self.val_date, self.tes_date, seq=self.paras['seq']\n )\n return True\n\nif __name__ == '__main__':\n desc = 'the lstm model'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-p', '--path', help='path of pv data', type=str,\n default='./data/stocknet-dataset/price/ourpped')\n parser.add_argument('-l', '--seq', help='length of history', type=int,\n default=5)\n parser.add_argument('-u', '--unit', help='number of hidden units in lstm',\n type=int, default=32)\n parser.add_argument('-l2', '--alpha_l2', type=float, default=1e-2,\n help='alpha for l2 regularizer')\n parser.add_argument('-la', '--beta_adv', type=float, default=1e-2,\n help='beta for adverarial loss')\n parser.add_argument('-le', '--epsilon_adv', type=float, default=1e-2,\n help='epsilon to control the scale of noise')\n parser.add_argument('-s', '--step', help='steps to make prediction',\n type=int, default=1)\n parser.add_argument('-b', '--batch_size', help='batch size', type=int,\n default=1024)\n parser.add_argument('-e', '--epoch', help='epoch', type=int, default=150)\n parser.add_argument('-r', '--learning_rate', help='learning rate',\n type=float, default=1e-2)\n parser.add_argument('-g', '--gpu', type=int, default=0, help='use gpu')\n parser.add_argument('-q', '--model_path', help='path to load model',\n type=str, default='./saved_model/acl18_alstm/exp')\n parser.add_argument('-qs', '--model_save_path', type=str, help='path to save model',\n default='./tmp/model')\n parser.add_argument('-o', '--action', type=str, default='train',\n help='train, test, pred')\n parser.add_argument('-m', '--model', type=str, default='pure_lstm',\n help='pure_lstm, di_lstm, att_lstm, week_lstm, aw_lstm')\n parser.add_argument('-f', '--fix_init', type=int, default=0,\n help='use fixed initialization')\n parser.add_argument('-a', '--att', type=int, default=1,\n help='use attention model')\n parser.add_argument('-w', '--week', type=int, default=0,\n help='use week day data')\n parser.add_argument('-v', '--adv', type=int, default=0,\n help='adversarial training')\n parser.add_argument('-hi', '--hinge_lose', type=int, default=1,\n help='use hinge lose')\n parser.add_argument('-rl', '--reload', type=int, default=0,\n help='use pre-trained parameters')\n args = parser.parse_args()\n print(args)\n\n parameters = {\n 'seq': int(args.seq),\n 'unit': int(args.unit),\n 'alp': float(args.alpha_l2),\n 'bet': float(args.beta_adv),\n 'eps': float(args.epsilon_adv),\n 'lr': float(args.learning_rate)\n }\n\n if 'stocknet' in args.path:\n tra_date = '2014-01-02'\n val_date = '2015-08-03'\n tes_date = '2015-10-01'\n elif 'kdd17' in args.path:\n tra_date = '2007-01-03'\n val_date = '2015-01-02'\n tes_date = '2016-01-04'\n else:\n print('unexpected path: %s' % args.path)\n exit(0)\n\n pure_LSTM = AWLSTM(\n data_path=args.path,\n model_path=args.model_path,\n model_save_path=args.model_save_path,\n parameters=parameters,\n steps=args.step,\n epochs=args.epoch, batch_size=args.batch_size, gpu=args.gpu,\n tra_date=tra_date, val_date=val_date, tes_date=tes_date, att=args.att,\n hinge=args.hinge_lose, fix_init=args.fix_init, adv=args.adv,\n reload=args.reload\n )\n\n if args.action == 'train':\n pure_LSTM.train()\n elif args.action == 'test':\n pure_LSTM.test()\n elif args.action == 'report':\n for i in range(5):\n pure_LSTM.train()\n elif args.action == 'pred':\n pure_LSTM.predict_record()\n elif args.action == 'adv':\n pure_LSTM.predict_adv()\n elif args.action == 'latent':\n pure_LSTM.get_latent_rep()","repo_name":"fulifeng/Adv-ALSTM","sub_path":"pred_lstm.py","file_name":"pred_lstm.py","file_ext":"py","file_size_in_byte":27563,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"5"} +{"seq_id":"14051988238","text":"import os\nimport re\nimport csv\nimport sys\nimport glob\n\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport config\n\n\ndef _find_distance_3d(point1: list, point2: list) -> float:\n \"\"\"\n Gets two point and find the distance between them\n \"\"\"\n return (((point2[0] - point1[0]) ** 2) + ((point2[1] - point1[1]) ** 2) + ((point2[2] - point1[2]) ** 2)) ** (0.5)\n\n\ndef _extract_csv_file(path: str) -> list:\n \"\"\"\n Gets path to csv file and returns the list of rows of it\n \"\"\"\n rows = []\n with open(path, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n\n for row in csvreader:\n rows.append(row)\n\n return rows\n\n\ndef _create_point_from_line_in_csv(line: list) -> list:\n \"\"\"\n Gets line of csv file and return the point in the line\n \"\"\"\n return [float(cordinate) for cordinate in [line[0], line[2], line[1]]]\n\n\ndef _get_frames_from_line_in_csv(line: list) -> list:\n \"\"\"\n Gets line of csv file and return all the frames that the point in them\n \"\"\"\n return [int(cell) for cell in line[3:]]\n\n\ndef find_closest_point_line_in_csv(path: str, expected_point: list) -> int:\n \"\"\"\n Gets point and path of the csv and returns the frames of the line that contains the closest point in the csv file\n \"\"\"\n rows = _extract_csv_file(path)\n min_distance = sys.maxsize\n min_line = -1\n for line_number, line in enumerate(rows):\n point_to_check = _create_point_from_line_in_csv(line)\n distance = _find_distance_3d(point_to_check, expected_point)\n if distance < min_distance:\n min_distance = distance\n min_line = line_number\n\n return _create_point_from_line_in_csv(rows[min_line]), _get_frames_from_line_in_csv(rows[min_line])\n\n\ndef _convert_frame_numbers_to_frames_path(frame_numbers: list) -> list:\n \"\"\"\n Gets the frames in numbers format and convert it to paths to files\n \"\"\"\n return [os.path.join(config.PATH_TO_DATA, f\"frame_{frame_number}.png\") for frame_number in frame_numbers]\n\n\ndef stitch_frames(frame_numbers: list) -> list:\n \"\"\"\n Gets from list of frames(number of frames) the stithicng of all of them\n \"\"\"\n # If there is only one frame, show it\n if len(frame_numbers) == 1:\n print(\"only one frame, showing it...\")\n frames_path = _convert_frame_numbers_to_frames_path(frame_numbers)\n cv.imshow(\"Frame of closest point\", cv.imread(frames_path[0]))\n cv.waitKey(0)\n cv.destroyAllWindows()\n return None\n\n frames = []\n frames_path = _convert_frame_numbers_to_frames_path(frame_numbers)\n\n for frame in frames_path:\n frame = cv.imread(frame)\n if frame is None:\n print(\"can't read image!\")\n sys.exit(-1)\n frames.append(frame)\n\n stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)\n status, pano = stitcher.stitch(frames)\n\n if status != cv.Stitcher_OK:\n print(\"Can't stitch images, error code = %d\" % status)\n sys.exit(-1)\n\n return pano\n\n\ndef show_image(image: list) -> None:\n \"\"\"\n Get cv image and shows it\n \"\"\"\n # Show the result\n cv.imshow(\"stitched frames\", image)\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\ndef show_frame(frame_number: int) -> None:\n \"\"\"\n Gets frame number and shows the image of it\n \"\"\"\n cv.imshow(f\"Frame {frame_number}\", cv.imread(os.path.join(config.PATH_TO_DATA, f\"frame_{frame_number}.png\")))\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\ndef _get_all_points(rows: list) -> {list, list}:\n \"\"\"\n Gets the rows of the csv file and returns two lists of x's and y's\n \"\"\"\n x, y, z = [], [], []\n\n for row in rows:\n x.append(float(row[0]))\n y.append(float(row[2]))\n z.append(float(row[1]))\n return x, y, z\n\n\ndef plot_data(path: str, expected_point: list, closest_point: list) -> None:\n \"\"\"\n Gets the path of the csv file, the point we wished to get and the closest point to it\n and plot the cloud points with marking the closest point and the point we wished to get\n \"\"\"\n rows = _extract_csv_file(path)\n\n x, y, _ = _get_all_points(rows)\n\n # Plot all the points\n plt.scatter(np.array(x), np.array(y), color=\"grey\", linewidth=0.1, s=2)\n\n # Plot the closes point in green and the wished point in red and make them big\n plt.scatter(expected_point[0], expected_point[1], color=\"red\", linewidth=0.1, s=20)\n plt.scatter(closest_point[0], closest_point[1], color=\"green\", linewidth=0.1, s=20)\n\n plt.draw()\n # Press ank key to close the plot\n while True:\n if plt.waitforbuttonpress(0):\n plt.close()\n break\n\n\ndef _get_all_frame_numbers(path: str) -> list:\n \"\"\"\n Gets the path to the directory of the data and return all the frame numbers\n \"\"\"\n frame_number_list = glob.glob(os.path.join(path, \"frame_*.png\"))\n frame_number_list = [os.path.basename(curr_path) for curr_path in frame_number_list]\n return [int(re.findall('[0-9]+', frame_path)[0]) for frame_path in frame_number_list]\n\n\ndef _sort_and_diluted_frame_numbers(frame_numbers: list, item_dilution: int) -> list:\n \"\"\"\n get list and how much to dilute Sort the list and save every 20th item\n \"\"\"\n frame_numbers.sort()\n diluted_frame_numbers = []\n for i in range(len(frame_numbers)):\n if i % item_dilution == 0:\n diluted_frame_numbers.append(frame_numbers[i])\n return diluted_frame_numbers\n\n\ndef _save_image(path_to_save: str, image: list) -> None:\n \"\"\"\n Gets the path we want to save the image to and the image we want to save\n \"\"\"\n cv.imwrite(path_to_save, image)\n\n\ndef _delete_frames(path: str) -> None:\n \"\"\"\n delete all the frame images from the path of the data\n \"\"\"\n frame_images_list = glob.glob(os.path.join(path, \"frame_*.png\"))\n for frame in frame_images_list:\n os.remove(frame)\n\n\ndef stitch_all_frames(path: str, item_dilution: int, delete_frames: bool) -> None:\n \"\"\"\n Get path to the data folder, the item dilution amount and if to delete the frames\n and save to it all the stitched frames as an picture named stitched_frames.png\n and delete if asked for\n \"\"\"\n frame_numbers = _get_all_frame_numbers(path)\n diluted_frame_numbers_sorted = _sort_and_diluted_frame_numbers(frame_numbers, item_dilution)\n image = stitch_frames(diluted_frame_numbers_sorted)\n _save_image(os.path.join(path, \"stitched_frames.png\"), image)\n if delete_frames:\n _delete_frames(path)\n\n\ndef _get_average_location(path: str) -> list:\n \"\"\"\n Gets the path of the data folder and returns list\n that define the average point of the scan\n \"\"\"\n number_of_frames = len(glob.glob(os.path.join(path, \"frameData_*.csv\")))\n average_point = [0, 0, 0]\n frames_data_list = glob.glob(os.path.join(path, \"frameData_*.csv\"))\n for frame_data in frames_data_list:\n row = _extract_csv_file(frame_data)[0]\n row = [float(item) for item in row]\n average_point[0] += row[1]\n average_point[1] += row[2]\n average_point[2] += row[3]\n average_point[0] /= number_of_frames\n average_point[1] /= number_of_frames\n average_point[2] /= number_of_frames\n return average_point\n\n\ndef _write_row_to_csv(row: list, file_path: str) -> None:\n \"\"\"\n Get the file path we want to save and the row we want to save\n and save the row to this path in csv format\n \"\"\"\n with open(file_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n\ndef save_average_location(path: str) -> None:\n \"\"\"\n Get the path to the data folder and save csv file\n that contains the average location at the scan\n \"\"\"\n average_point = _get_average_location(path)\n _write_row_to_csv(average_point, os.path.join(path, \"average_location.csv\"))\n","repo_name":"liamvanunu/frame_manipulation","sub_path":"src/frame_manipulation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27539000042","text":"import codecs\nimport os\nimport requests\nimport telebot\nimport datetime\nfrom telebot import types\n\n\ntoken = '5350862817:AAGZcjBItg2uoTzkJG1bMLEoPZQE1HyhiMo'\nbot = telebot.TeleBot(token)\nd = {\"инф-ка\": '1', \"лит-ра\": '2', \"алгебра\": '3', \"ино иэ\": '4', \"физика\": '5',\n \"геометрия\": '6',\n \"русский язык\": '7', \"химия\": '8', \"история\": '9', \"ино са\": '0', 'немецкий язык': 'a',\n 'астрономия': 'b'}\nd1 = {'1': \"инф-ка\", '2': \"лит-ра\", '3': \"алгебра\", '4': \"ино, группа иэ\", '5': \"физика\",\n '6': \"геометрия\",\n '7': \"русский язык\", '8': \"химия\", '9': \"история\", '0': \"ино, гр��ппа са\", 'a': 'немецкий язык',\n 'b': 'астрономия'}\nwd = {\"понедельник\": 1, \"вторник\": 2, \"среда\": 3, \"четверг\": 4, \"пятница\": 5, \"суббота\": 6}\ntime = {1: \"09:00-09:40\", 2: \"09:55-10:35\", 3: \"10:50-11:30\", 4: \"11:45-12:25\", 5: \"12:40-13:20\",\n 6: \"13:35-14:15\", 7: \"14:25-15:05\"}\ntime_dist = {1: \"09:00-09:30\", 2: \"09:55-10:25\", 3: \"10:50-11:20\", 4: \"11:45-12:15\", 5: \"12:40-13:10\",\n 6: \"13:35-14:05\", 7: \"14:20-14:50\"}\n__author1__ = \"@Talziar\"\n__author2__ = \"@artemgoriunov\"\nwdnum = 0\nnum = 0\nanum = 0\nzoom_num = 0\nchat_id = \"\" # demo\nchannel_id = \"\" # demo\nstroki = []\nurllist = []\nusersid = []\nop = ['591545559']\noperators = set(op)\ntry:\n with codecs.open('dist.txt', 'r', encoding='utf-8') as fi:\n dist = int(fi.read())\nexcept OSError:\n with codecs.open('dist.txt', 'w', encoding='utf-8') as fi:\n fi.seek(0)\n fi.write('0')\ntry:\n with codecs.open('whitelist.txt', 'r', encoding='utf-8') as fi:\n stroki = map(int, fi.readlines())\nexcept OSError:\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as fi:\n fi.seek(0)\nwhitelist = set(stroki)\ntry:\n with codecs.open('id.txt', 'r', encoding='utf-8') as fi:\n t = fi.readlines()\n for irt in t:\n usersid.append(int(irt))\n usersidset = set(usersid)\nexcept OSError:\n with codecs.open('id.txt', 'w', encoding='utf-8') as fi:\n fi.seek(0)\n usersidset = set([])\n usersid = []\nif 591545559 not in whitelist:\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as fi:\n fi.write('591545559\\n')\nactionKeyboard = types.ReplyKeyboardMarkup()\nactionKeyboard.row('Узнать расписание \\n📅', 'Узнать ДЗ \\n📖')\nactionKeyboard.row('Посмотреть команды \\n📔', 'Узнать данные zoom \\n📹')\nactionKeyboard.row('Просмотреть файлы \\n📁', 'Изменить дз \\n📖')\nsubjectKeyboard = types.ReplyKeyboardMarkup(False, True)\nsubjectKeyboard.row(\"Инф-ка\", \"Лит-ра\", \"Алгебра\", \"Астрономия\")\nsubjectKeyboard.row(\"Ино ИЭ\", \"Ино СА\", \"Физика\", \"Геометрия\")\nsubjectKeyboard.row(\"Русский язык\", \"Химия\", \"История\", 'Немецкий язык')\ntimetableKeyboard = types.ReplyKeyboardMarkup(False, True)\ntimetableKeyboard.row(\"Понедельник\", \"Вторник\", \"Среда\")\ntimetableKeyboard.row(\"Четверг\", \"Пятница\", \"Суббота\")\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n if message.from_user.id not in usersidset:\n with codecs.open('id.txt', 'a', encoding='utf-8') as f:\n f.write(str(message.from_user.id) + '\\n')\n usersidset.add(message.from_user.id)\n usersid.append(message.from_user.id)\n\n\n@bot.message_handler(commands=['knowadmin'])\ndef become_admin(message):\n msg = bot.send_message(message.chat.id, 'Создатели - @Talziar, @artemgoriunov')\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(commands=['adminrebirth'])\ndef become_admin(message):\n msg = bot.send_message(message.chat.id, 'Вы уверены, что хотите отправить заявку '\n 'на становление администратором?/n Ответьте да или нет')\n bot.register_next_step_handler(msg, becoming_admin)\n\n\n@bot.message_handler(commands=['commands'])\ndef help_message(message):\n msg_text = 'Список команд:\\n/alltasks - вывод всех заданий на экран\\n/belltableshow - показ расписания звонков\\n' \\\n '/help - для связи с администраторами\\n' \\\n '/adminrebirth - для запроса на получение админ-прав\\n' \\\n '/knowadmin - узнать авторов'\n if message.from_user.id in whitelist:\n msg_text += '\\n/timetablechange - чтобы изменить расписание на определенный день\\n' \\\n '/zoomchange - чтобы изменить идентификатор и пароль для zoom по определенному предмету\\n' \\\n '/taskadd - добавление задания к существующему по определенному предмету\\n' \\\n '/announce - сделать объявление в группе'\n if str(message.from_user.id) in operators:\n msg_text += '\\n/newadmin - добавить нового администратора\\n/deleteadmin - забрать права администратора' \\\n '\\n/distchange - сменить режим обучения'\n bot.send_message(message.chat.id, msg_text, reply_markup=actionKeyboard)\n\n\n@bot.message_handler(commands=['newadmin'])\ndef add_new_admin(message):\n if str(message.from_user.id) in operators:\n msg = bot.send_message(message.chat.id, \"Кого ты хочешь сделать новым админиcтратором?\")\n bot.register_next_step_handler(msg, adding_new_admin)\n else:\n bot.send_message(message.chat.id, \"У тебя нет прав для совершения этого действия\")\n\n\n@bot.message_handler(commands=['deleteadmin'])\ndef delete_admin(message):\n if str(message.from_user.id) in operators:\n msg = bot.send_message(message.chat.id, \"У кого ты хочешь забрать права администратора?\")\n bot.register_next_step_handler(msg, deleting_admin)\n else:\n bot.send_message(message.chat.id, \"У тебя нет прав для совершения этого действия\")\n\n\n@bot.message_handler(commands=['announce'])\ndef announce(message):\n if message.from_user.id in whitelist:\n msg = bot.send_message(message.chat.id, \"О чем ты хочешь сообщить?\")\n bot.register_next_step_handler(msg, announcement)\n else:\n bot.send_message(message.chat.id, \"У тебя нет прав использовать эту команду\")\n\n\n@bot.message_handler(commands=['help'])\ndef admin_help(message):\n msg = bot.send_message(message.chat.id, \"О чем ты хочешь сообщить администраторам?\")\n bot.register_next_step_handler(msg, admin_helping)\n\n\n@bot.message_handler(commands=['alltasks'])\ndef all_tasks_output(message):\n try:\n with codecs.open('task.txt', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n bot.send_message(message.chat.id, \"Нет домашнего задания\")\n else:\n for i in lines:\n if i != '':\n bot.send_message(message.chat.id, d1[i[0]].capitalize() + ': ' + i[2].lower() + i[3:])\n\n\n@bot.message_handler(commands=['distchange'])\ndef dist_change(message):\n if str(message.from_user.id) in operators:\n global dist\n try:\n with codecs.open('dist.txt', 'r', encoding='utf-8') as f:\n dist = int(f.read())\n except OSError:\n with codecs.open('dist.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n f.write('0')\n dist = 0\n bot.send_message(message.chat.id, \"Установлен школьный режим обучения\")\n else:\n with codecs.open('dist.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n f.write(str(abs(dist - 1)))\n dist = abs(dist - 1)\n if dist == 0:\n bot.send_message(message.chat.id, \"Установлен школьный режим обучения\")\n else:\n bot.send_message(message.chat.id, \"Установлен дистанционный режим обучения\")\n else:\n bot.send_message(message.chat.id, \"У тебя нет прав использовать эту команду\")\n\n\n@bot.message_handler(commands=['belltableshow'])\ndef bell_table_show(message):\n belltable = 'Расписание звонков\\n'\n for i in range(1, 8):\n belltable += str(i) + ')' + time[i] + '\\n'\n bot.send_message(message.chat.id, belltable)\n belltable_dist = 'Расписание звонков во время дистанционного обучения\\n'\n for i in range(1, 8):\n belltable_dist += str(i) + ')' + time_dist[i] + '\\n'\n bot.send_message(message.chat.id, belltable_dist)\n\n\n@bot.message_handler(commands=['taskchange'])\ndef task_change(message):\n if message.from_user.id not in whitelist:\n bot.send_message(message.chat.id, \"У тебя нет прав изменять домашнее задание\")\n else:\n msg = bot.send_message(message.chat.id, 'По какому предмету ты хочешь внести дз?', reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_input)\n\n\n@bot.message_handler(commands=['taskadd'])\ndef task_add(message):\n if message.from_user.id not in whitelist:\n bot.send_message(message.chat.id, \"У тебя нет прав изменять домашнее задание\")\n else:\n msg = bot.send_message(message.chat.id, 'По какому предмету ты хочешь добавить дз?',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_add)\n\n\n@bot.message_handler(commands=['uploadphoto'])\ndef send_photo(message):\n msg = bot.send_message(message.chat.id, \"Go on, honey\")\n bot.register_next_step_handler(msg, sending_photo)\n\n\ndef sending_photo(message):\n file_info = bot.get_file(message.photo[-1].file_id)\n msg = bot.send_message(message.chat.id, 'В какую директорию вы хотите положить данный файл?',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, get_file_name, file_info)\n\n\ndef get_file_name(message, file_info):\n dr = message.text\n msg = bot.send_message(message.chat.id, \"Как назвать фото?\")\n r = requests.get(\n 'https://api.telegram.org/file/bot1066163670:AAFsraOqL5QCp-dHXN9J5LaFlBfFXCBuTm8/' + file_info.file_path)\n bot.register_next_step_handler(msg, save_photo, r, dr)\n\n\ndef save_photo(message, r, direct):\n file_name = str(message.text) + '.jpg'\n script_dir = os.path.dirname(os.path.abspath(__file__))\n dest_dir = os.path.join(script_dir, direct)\n try:\n os.makedirs(dest_dir)\n except OSError:\n pass\n path = os.path.join(dest_dir, file_name)\n open(path, 'wb').write(r.content)\n bot.send_message(message.chat.id, \"Фото добавлено, господин!\")\n\n\n@bot.message_handler(commands=['deletephoto'])\ndef delete_photo(message):\n msg = bot.send_message(message.chat.id, \"Выбери предмет, \"\n \"по которому хочешь удалить фото\", reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, photo_choose)\n\n\ndef photo_choose(message):\n if show_list_all_photos(message):\n msg = bot.send_message(message.chat.id, \"Введи название фото, которое хочешь удалить\")\n bot.register_next_step_handler(msg, deleting_photo, message.text)\n\n\ndef deleting_photo(message, direct):\n file_name = message.text\n script_dir = os.path.dirname(os.path.abspath(__file__))\n dest_dir = os.path.join(script_dir, direct)\n file_path = os.path.join(dest_dir, file_name)\n try:\n os.remove(file_path)\n except OSError as e:\n bot.send_message(message.chat.id, \"Что-то пошло не так, попробуйте еще раз\")\n delete_photo(message)\n return\n bot.send_message(message.chat.id, 'Фото успешно удалено')\n\n\n@bot.message_handler(commands=['listallphotos'])\ndef list_all_photos(message):\n msg = bot.send_message(message.chat.id, \"Выбери предмет\", reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, show_list_all_photos)\n\n\ndef show_list_all_photos(message):\n for root, dirs, files in os.walk(message.text):\n for filename in files:\n dest_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), message.text)\n path = os.path.join(dest_dir, filename)\n img = open(path, 'rb')\n bot.send_photo(message.chat.id, img, filename)\n return True\n bot.send_message(message.chat.id, \"По этому предмету нет фотографий\")\n return False\n\n\n@bot.message_handler(commands=['timetablechange'])\ndef timetable_change(message):\n if message.from_user.id not in whitelist:\n bot.send_message(message.chat.id, \"У тебя нет прав изменять расписание\")\n else:\n msg = bot.send_message(message.chat.id, 'На какой день недели ты хочешь изменить расписание?',\n reply_markup=timetableKeyboard)\n bot.register_next_step_handler(msg, day_input)\n\n\n@bot.message_handler(commands=['zoomchange'])\ndef zoom_ind_change_choice(message):\n if message.from_user.id not in whitelist:\n bot.send_message(message.chat.id, \"У тебя нет прав изменять данные zoom\")\n else:\n msg = bot.send_message(message.chat.id,\n \"По какому предмету ты хочешь изменить идентификатор и пароль для входа в zoom?\",\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_zoom_input)\n\n\n@bot.message_handler(content_types=['text'])\ndef send_text(message):\n st = message.text.lower()\n global whitelist\n if message.chat.id != channel_id and message.chat.id != chat_id:\n if st == \"узнать дз \\n📖\":\n msg = bot.send_message(message.chat.id, 'По какому предмету ты хочешь узнать дз?',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, task_output)\n elif st == \"узнать расписание \\n📅\":\n msg = bot.send_message(message.chat.id, 'На какой день недели ты хочешь узнать расписание?',\n reply_markup=timetableKeyboard)\n bot.register_next_step_handler(msg, time_table_output)\n elif st == \"узнать данные zoom \\n📹\":\n msg = bot.send_message(message.chat.id,\n \"По какому предмету ты хочешь получить идентификатор и пароль для входа в zoom?\",\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, zoom_ind_out)\n elif st == \"Спасибо!\":\n bell_table_show()\n elif st == \"изменить дз \\n📖\":\n if message.from_user.id in whitelist:\n msg = bot.send_message(message.chat.id, 'По какому предмету ты хочешь внести дз?',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_input)\n else:\n bot.send_message(message.chat.id,\n \"У вас нет прав использовать эту команду. \"\n \"Свяжитесь с администратором для предоставления доступа через команду /help.\")\n elif st == \"посмотреть команды \\n📔\":\n msg_text = 'Список команд:\\n/alltasks - вывод всех заданий на экран\\n' \\\n '/belltableshow - показ расписания звонков\\n/help - для связи с администраторами\\n' \\\n '/knowadmin - узнать создателей бота\\n' \\\n '/adminrebirth - для запроса на получение админ-прав'\n\n if message.from_user.id in whitelist:\n msg_text += '\\n/timetablechange - чтобы изменить расписание на определенный день\\n' \\\n '/zoomchange - чтобы изменить идентификатор и пароль для zoom по определенному предмету\\n' \\\n '/taskadd - добавление задания к существующему по определенному предмету\\n' \\\n '/announce - сделать объявление в группе\\n' \\\n '/uploadphoto - Выгрузить фото в бота'\n if str(message.from_user.id) in operators:\n msg_text += '\\n/newadmin - добавить нового администратора\\n' \\\n '/deleteadmin - забрать права администратора' \\\n '\\n/distchange - сменить режим обучения'\n bot.send_message(message.chat.id, msg_text, reply_markup=actionKeyboard)\n elif st == \"просмотреть файлы \\n📁\":\n bot.send_message(message.chat.id, \"Файлы для 10В:\\nhttps://yadi.sk/d/RoGusYVeKUkPcg?w=1\",\n reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Такой команды не существует', reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef subject_input(subject):\n if subject.text != '/start':\n global num\n try:\n num = d[subject.text.lower()]\n except KeyError:\n msg = bot.reply_to(subject, 'Тварь болотная, все еще не можешь запомнить предметы?. Попробуй еще раз.',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_input)\n else:\n message = bot.reply_to(subject, 'Укажи домашнее задание по этому предмету')\n bot.register_next_step_handler(message, task_input)\n else:\n bot.send_message(subject.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef subject_add(subject):\n if subject.text != '/start':\n global anum\n try:\n anum = d[subject.text.lower()]\n except KeyError:\n msg = bot.reply_to(subject, 'Тварь болотная, все еще не можешь запомнить предметы?. Попробуй еще раз.',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_input)\n else:\n try:\n with codecs.open('task.txt', encoding='utf-8') as f:\n for line in f:\n if line[0] == anum:\n msg = bot.reply_to(subject, line[2:] + '\\nЧто ты хочешь добавить к этому заданию?',\n reply_markup=actionKeyboard)\n bot.register_next_step_handler(msg, adding_task)\n break\n else:\n bot.send_message(subject.chat.id, 'Дз по этому предмету не существует',\n reply_markup=actionKeyboard)\n except OSError:\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n bot.send_message(subject.chat.id, 'Дз по этому предмету не существует', reply_markup=actionKeyboard)\n else:\n bot.send_message(subject.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef day_input(message):\n if message.text != '/start':\n global wdnum\n try:\n wdnum = wd[message.text.lower()]\n except KeyError:\n msg = bot.reply_to(message, 'Такого дня недели не существует. Попробуй еще раз.',\n reply_markup=timetableKeyboard)\n bot.register_next_step_handler(msg, day_input)\n else:\n message = bot.reply_to(message, 'Укажите расписание на выбранный день по следующему правилу:\\n'\n '1)Если урока нет,то нужно написать blank;\\n'\n '2)Все предметы записывать через запятую Без пробела. Например:\\n'\n 'blank,blank,Математика,Математика,Русский язык,Литература,Иностранный')\n bot.register_next_step_handler(message, time_table_input)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef subject_zoom_input(message):\n if message.text != '/start':\n global zoom_num\n try:\n zoom_num = d[message.text.lower()]\n except KeyError:\n msg = bot.reply_to(message, 'Такого предмета не существует. Попробуй еще раз.',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, subject_zoom_input)\n else:\n msg = bot.reply_to(message,\n 'Укажите идентификатор и пароль для входа в zoom для предмету, который вы выбрали')\n bot.register_next_step_handler(msg, zoom_ind_in)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef time_table_input(message):\n if message.text != '/start':\n lines = []\n try:\n with codecs.open('timetable.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('timetable.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('timetable.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if int(line[0]) != wdnum:\n f.write(line)\n f.seek(0, 2)\n f.write(str(wdnum) + \")\" + message.text + '\\n')\n bot.reply_to(message, 'Расписание изменено. Тебе еще что-то нужно?', reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef time_table_output(message):\n if message.text != '/start':\n try:\n chislo = wd[message.text.lower()]\n except KeyError:\n msg = bot.reply_to(message, 'Такого дня недели не существует. Попробуй еще раз.',\n reply_markup=timetableKeyboard)\n bot.register_next_step_handler(msg, time_table_output)\n else:\n try:\n with codecs.open('timetable.txt', encoding='utf-8') as f:\n for line in f:\n if int(line[0]) == chislo:\n items = str(line[2:]).split(',')\n out = ''\n if dist == 1:\n times = time_dist\n else:\n times = time\n for ind, val in enumerate(items, 1):\n if val == 'blank':\n out += str(ind) + ')\\n'\n else:\n out += str(ind) + ')' + times[ind] + ' ' + val + '\\n'\n bot.reply_to(message, out, reply_markup=actionKeyboard)\n break\n else:\n bot.send_message(message.chat.id, 'Расписание на этот день не занесено в базу данных',\n reply_markup=actionKeyboard)\n except OSError:\n with codecs.open('timetable.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n bot.send_message(message.chat.id, \"Расписание не занесено в базу данных\", reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef task_input(task):\n if task.text != '/start':\n now = datetime.datetime.now()\n lines = []\n try:\n with codecs.open('task.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if line[0] != num:\n f.write(line)\n f.seek(0, 2)\n f.write(str(num) + \")\" + task.text + \". Дата добавления задания \" + now.strftime(\"%d-%m-%Y %H:%M\") + \"\\n\")\n bot.reply_to(task, 'Задание записано. Тебе еще что-то нужно?', reply_markup=actionKeyboard)\n \"\"\" for i in whitelist:\n try:\n # if not i == task.from_user.id:\n bot.send_message(i, \"@\" + task.from_user.username +\n \" изменил задание по предмету - \" +\n d1[num].capitalize() + '\\nВот его содержимое: ' + task.text)\n except telebot.apihelper.ApiTelegramException:\n continue\n except TypeError:\n bot.send_message(i, \"Что-то пошло не так. Проверьте есть ли у вас имя пользователя\")\n break\n \"\"\"\n else:\n bot.send_message(task.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n\"\"\" \nПочему не так, идиот?\nif message.text.lower() == '/start':\n bot.send_message(task.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n return\n\"\"\"\n\n\n@bot.message_handler(content_types=['text'])\ndef task_output(message):\n if message.text != '/start':\n global num\n try:\n num = d[message.text.lower()]\n except KeyError:\n msg = bot.reply_to(message, 'Тварь болотная, все еще не можешь запомнить предметы? Попробуй еще раз.',\n reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, task_output)\n else:\n try:\n with codecs.open('task.txt', encoding='utf-8') as f:\n for line in f:\n if line[0] == num:\n bot.reply_to(message, str(line[2:]).replace(';', '\\n'), reply_markup=actionKeyboard)\n break\n else:\n bot.send_message(message.chat.id, 'Дз по этому предмету не существует',\n reply_markup=actionKeyboard)\n except OSError:\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n bot.send_message(message.chat.id, 'Дз по этому предмету не существует', reply_markup=actionKeyboard)\n bot.send_message(message.chat.id, 'Тебе еще что-то нужно?', reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef zoom_ind_in(message):\n if message.text != '/start':\n lines = []\n try:\n with codecs.open('zoom.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('zoom.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('zoom.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if line[0] != zoom_num:\n f.write(line)\n f.seek(0, 2)\n sp = str(message.text).split()\n f.write(str(zoom_num) + \")\" + \"Идентификатор - \" + sp[0] + \". \" + \"Пароль - \" + sp[1] + \"\\n\")\n bot.reply_to(message, 'Данные записаны. Тебе еще что-то нужно?', reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef zoom_ind_out(message):\n if message.text != '/start':\n try:\n chislo = d[message.text.lower()]\n except KeyError:\n msg = bot.reply_to(message,\n 'Такого предмета не существует. Попробуй еще раз.', reply_markup=subjectKeyboard)\n bot.register_next_step_handler(msg, zoom_ind_out)\n else:\n try:\n with codecs.open('zoom.txt', encoding='utf-8') as f:\n for line in f:\n if line[0] == chislo:\n bot.reply_to(message, line[2:], reply_markup=actionKeyboard)\n break\n else:\n bot.send_message(message.chat.id,\n 'Идентификатор zoom для этого предмета не внесен в базу данных',\n reply_markup=actionKeyboard)\n except OSError:\n with codecs.open('zoom.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n bot.send_message(message.chat.id, \"Идентификаторы zoom не внесены в базу данных\",\n reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef adding_new_admin(message):\n mes = message.text\n if mes != '/start':\n if not mes.isdigit():\n msg = bot.send_message(message.chat.id, \"Ты ввел что-то не так. Попробуй еще раз.\")\n bot.register_next_step_handler(msg, adding_new_admin)\n else:\n whitelist.add(int(mes))\n try:\n with codecs.open('whitelist.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if int(line) != int(mes):\n f.write(line)\n f.seek(0, 2)\n f.write(mes + '\\n')\n bot.send_message(message.chat.id, \"Новый администратор успешно добавлен!\")\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef deleting_admin(message):\n mes = message.text\n if mes == \"383193252\":\n bot.send_message(message.chat.id, \"Ты не можешь забрать права администратора у моего создателя\")\n elif mes != '/start':\n if not mes.isdigit():\n msg = bot.send_message(message.chat.id, \"Ты ввел что-то не так. Попробуй еще раз.\")\n bot.register_next_step_handler(msg, adding_new_admin)\n else:\n whitelist.discard(int(mes))\n try:\n with codecs.open('whitelist.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('whitelist.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if int(line) != int(mes):\n f.write(line)\n bot.send_message(message.chat.id, \"Права администратора забраны\")\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef announcement(message):\n # bot.forward_message(chat_id, message.from_user.id, message.message_id)\n for i in usersid:\n try:\n if not i == message.from_user.id:\n bot.send_message(i, \"Уважаемый(ая) \" + \"@\" + message.from_user.username +\n \" попросил(а) меня передать пользователям:\", \"\\n\" + message.text)\n except telebot.apihelper.ApiTelegramException:\n continue\n bot.send_message(message.chat.id, \"Сообщение успешно отправлено!\")\n\n\n@bot.message_handler(content_types=['text'])\ndef admin_helping(message):\n if message.text != '/start':\n for i in whitelist:\n try:\n if not i == message.from_user.id:\n bot.send_message(i,\n \"Личность с именем \" + \"@\" + message.from_user.username +\n \" попросило меня передать администраторам:\", \"\\n\" + str(message.text))\n except telebot.apihelper.ApiTelegramException:\n continue\n except TypeError:\n msg = bot.send_message(message.chat.id, 'Ты ввел(а) что-то нет так, попробуй еще раз')\n bot.register_next_step_handler(msg, admin_help)\n bot.send_message(message.chat.id, \"Сообщение успешно отправлено!\")\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef becoming_admin(message):\n if message.text == '/start':\n bot.send_message(message.chat.id, 'Нажми одну из кнопок ' + 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n return\n if message.text.lower() == 'да':\n try:\n name = message.from_user.first_name + ' ' + message.from_user.last_name\n user_id = str(message.from_user.id)\n bot.send_message(383193252, name + ' хочет стать админом. ' + 'Вы согласны? Вот его айди:' + '\\n' + user_id)\n except TypeError:\n msg = bot.send_message(message.chat.id,\n 'Something is wrong. Do you have both firstname and surname in Telegram?')\n bot.register_next_step_handler(msg, becoming_admin)\n elif message.text.lower() == 'нет':\n bot.send_message(message.chat.id, 'Запрос на получение прав администратора отменен')\n else:\n msg = bot.send_message(message.chat.id, 'Вы ввели что-то не так. Так вы хотите стать администратором? да/нет')\n bot.register_next_step_handler(msg, becoming_admin)\n\n\n@bot.message_handler(content_types=['text'])\ndef adding_task(message):\n mes = message.text\n if mes != '/start':\n lines = []\n try:\n with codecs.open('task.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n except OSError:\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n f.seek(0)\n with codecs.open('task.txt', 'w', encoding='utf-8') as f:\n for line in lines:\n if line[0] != anum:\n f.write(line)\n else:\n f.seek(0, 2)\n f.write(line[:-1] + ' ' + mes + '\\n')\n bot.reply_to(message, 'Задание записано. Тебе еще что-то нужно?', reply_markup=actionKeyboard)\n else:\n bot.send_message(message.chat.id, 'Нажми одну из кнопок '\n 'или набери /commands для списка команд.',\n reply_markup=actionKeyboard)\n\n\nbot.polling(none_stop=True)\n","repo_name":"Artem1901/Bot_test","sub_path":"NewBranch.py","file_name":"NewBranch.py","file_ext":"py","file_size_in_byte":41140,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25096474568","text":"#!/usr/bin/python3\r\n# -*-coding:utf-8 -*-\r\n#Author:Eminjan\r\n#@Time :2018/4/18 20:06\r\nfrom random import Random\r\nfrom django.core.mail import send_mail,EmailMessage\r\n\r\nfrom users.models import EmailVerifyRecord\r\nfrom Mxonline.settings import EMAIL_FROM\r\nfrom django.template import loader\r\n\r\ndef random_str(random_length=8):\r\n str = ''\r\n # 生成字符串的可选字符串\r\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'\r\n length = len(chars) - 1\r\n random = Random()\r\n for i in range(random_length):\r\n str += chars[random.randint(0, length)]\r\n return str\r\n\r\n\r\ndef send_register_email(email,send_type='register'):\r\n email_record = EmailVerifyRecord()\r\n if send_type == \"update_email\":\r\n code = random_str(4)\r\n else:\r\n code = random_str(16)\r\n email_record.code = code\r\n email_record.email = email\r\n email_record.send_type = send_type\r\n\r\n email_record.save()\r\n\r\n email_title = \"\"\r\n email_body = \"\"\r\n\r\n if send_type == 'register':\r\n email_title = 'Mx Online 激活链接'\r\n # email_body = '请点击下面的链接激活你的账号:http://127.0.0.1:8000/active/{0}'.format(code)\r\n email_body = loader.render_to_string(\r\n \"email_register.html\", # 需要渲染的html模板\r\n {\r\n \"active_code\": code # 参数\r\n }\r\n )\r\n\r\n msg = EmailMessage(email_title, email_body, EMAIL_FROM, [email])\r\n msg.content_subtype = \"html\"\r\n send_status = msg.send()\r\n # send_status = send_mail(email_title,email_body,EMAIL_FROM,[email])\r\n if send_status:\r\n pass\r\n elif send_type == 'forget':\r\n email_title = 'Mx Online 密码重置链接'\r\n # email_body = '请点击下面的重置你的密码:http://127.0.0.1:8000/reset/{0}'.format(code)\r\n email_body = loader.render_to_string(\r\n \"email_forget.html\", # 需要渲染的html模板\r\n {\r\n \"active_code\": code # 参数\r\n }\r\n )\r\n\r\n msg = EmailMessage(email_title, email_body, EMAIL_FROM, [email])\r\n msg.content_subtype = \"html\"\r\n send_status = msg.send()\r\n # 如果发送成功\r\n if send_status:\r\n pass\r\n elif send_type =='update_email':\r\n email_title = 'Mx Online 邮箱修改验证码'\r\n # email_body = '你的邮箱验证码为:{0}'.format(code)\r\n email_body = loader.render_to_string(\r\n \"email_update_email.html\", # 需要渲染的html模板\r\n {\r\n \"active_code\": code # 参数\r\n }\r\n )\r\n\r\n msg = EmailMessage(email_title, email_body, EMAIL_FROM, [email])\r\n msg.content_subtype = \"html\"\r\n send_status = msg.send()\r\n if send_status:\r\n pass","repo_name":"Eminjan/MxOnline","sub_path":"utils/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"37727689675","text":"import operator\nfrom functools import reduce\n\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\n\n\ndef get_pagination_info(request, posts, per_page=6):\n \"\"\"\n request = HttpRequest object\n posts - posts needed for showing\n per_page - how many posts you want to show on one page\n return 'page' and 'paginator'\n \"\"\"\n # показывать по 6 записей на странице\n paginator = Paginator(posts, per_page)\n # переменная в url с номером запрошеной страницы\n page_number = request.GET.get('page')\n # получить записи с нужным смещением\n page = paginator.get_page(page_number)\n return page, paginator\n\n\ndef get_tags(request):\n \"\"\"фильтрация рецептов по тегам\"\"\"\n\n # получаем список переменных с названием тега\n tags = request.GET.getlist('tag')\n\n if tags:\n tags_filter = reduce(operator.or_,\n (Q(tags__contains=tag) for tag in tags))\n return tags, tags_filter\n\n return tags, None\n","repo_name":"vostavhy/foodgram-project","sub_path":"working_scripts/rendering_scripts.py","file_name":"rendering_scripts.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19024458713","text":"# idea: \n# - choose an target, sort the array so that every element in its left is smalller, right is bigger (partition)\n\n\n# solution:\n# we start from the leftmost element and keep track of the index of smaller (or equal) elements as i. \n# While traversing, if we find a smaller element, we swap the current element with arr[i]. \n# Otherwise, we ignore the current element\n\ndef quicksort(array, low, high):\n if low < high:\n \n # Find pivot element such that\n # element smaller than pivot are on the left\n # element greater than pivot are on the right\n pi = partition(array, low, high)\n \n # Recursive call on the left of pivot\n quicksort(array, low, pi - 1)\n \n # Recursive call on the right of pivot\n quicksort(array, pi + 1, high)\n return array\n \n\n# Function to find the partition position\ndef partition(array, low, high):\n \n # Choose the rightmost element as pivot\n pivot = array[high]\n \n # Pointer for greater element\n i = low - 1\n \n # Traverse through all elements\n # compare each element with pivot\n for j in range(low, high):\n if array[j] <= pivot:\n \n # If element smaller than pivot is found\n # swap it with the greater element pointed by i\n i = i + 1\n \n # Swapping element at i with element at j\n (array[i], array[j]) = (array[j], array[i])\n \n # Swap the pivot element with\n # the greater element specified by i\n (array[i + 1], array[high]) = (array[high], array[i + 1])\n \n # Return the position from where partition is done\n return i + 1\n\n\nprint(quicksort([7,1,2,6,4], 0, 4))","repo_name":"yijencheng/Leetcode","sub_path":"sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25504649786","text":"# from django.conf.urls import url\nfrom django.urls import path\nfrom UserApp import views\n\nurlpatterns = [\n # url(r'^user$', views.userAPI),\n # url(r'^user/([0-9]+)$', views.userAPI),\n # url(r'^payment$', views.paymentAPI),\n # url(r'^payment/([0-9]+)$', views.paymentAPI),\n path('user', views.userAPI),\n path('user/<int:id>', views.userAPI),\n path('payment', views.paymentAPI),\n path('payment/<int:id>', views.paymentAPI),\n]","repo_name":"panjitamzil/django-api","sub_path":"UserApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20299696157","text":"import pymysql\nimport pymysql.cursors\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import font\nfrom tkinter import messagebox\n\ndef sea_stud_lib(sid):\n conn=pymysql.connect(host='localhost',user='root')\n a=conn.cursor()\n a.execute('use books')\n a.execute('create table if not exists s'+str(sid)+'(Book_code int,Book_name char(60),Author_name char(60),constraint primary key(Book_name,Author_name))')\n a.execute('select * from s'+str(sid))\n data=a.fetchall()\n if(len(data)==0):\n messagebox.showinfo(\"Book Inquiry\",\"No Books Due\")\n else:\n root1=Tk()\n root1.geometry(\"1300x700+100+75\")\n for i in range(len(data)):\n for j in range(len(data[i])):\n label=ttk.Label(root1,text=data[i][j])\n label.place(x=str(150*(j+1)),y=str(50*(i+2)))\n label.config(font=('Helvetica',14,\"bold\") ,background = 'cyan',foreground = 'black')\n\n\ndef base_lib():\n root = Tk()\n root.geometry(\"400x250+600+300\")\n root.config(background = 'cyan')\n root.title(\"Library Management System By Ankur Kumar Pandey\")\n label = ttk.Label(root,text = \"Enter Student Id To Get The Information\")\n label.place(x='10',y='80')\n label.config(font=('Helvetica',14,\"bold\") ,background = 'cyan',foreground = 'black')\n s_id = ttk.Entry(root,width=20)\n s_id.place(x='50',y='170')\n submit = ttk.Button(root,text=\"Search\",command=lambda:[sea_stud_lib(int(s_id.get())),root.destroy()])\n submit.place(x='250',y='170')\n","repo_name":"ankurp187/python_lib_mngmnt-","sub_path":"stu_lib.py","file_name":"stu_lib.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"15908141951","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 15 12:15:12 2017\r\n\r\n@author: a550859\r\n\"\"\"\r\n\r\nimport os\r\n\r\npathname='C:/Users/a550859/Desktop/py training/Sampledata'\r\nos.chdir(pathname)\r\nfile=open('output.csv',mode='r')\r\nfor row in file:\r\n print(row)\r\n colarray=row.split(',')\r\n for col in colarray:\r\n print(col)\r\n \r\n ","repo_name":"srikannanbs/python","sub_path":".gitignore/fileread.py","file_name":"fileread.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16783478893","text":"from django.shortcuts import render, redirect\nfrom board.models import Board, Comment\n\n# Create your views here.\ndef mypageBlog(request):\n if request.user.is_authenticated:\n posts = Board.objects.filter(userId = request.user.id)\n return render(request, 'mypage/mypageBlog.html', {'posts': posts})\n else:\n return redirect('login')\n\ndef mypageComment(request):\n if request.user.is_authenticated:\n posts = Comment.objects.filter(userId = request.user.id)\n return render(request, 'mypage/mypageComment.html', {'posts': posts})\n else:\n return redirect('login')","repo_name":"seung-gyu-kim/LikeLion_GASILI_Project","sub_path":"mypage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73989722393","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.patches import Rectangle\n\n\nclass Agent:\n def __init__(self, Environment):\n self.action_space = [\"up\", \"down\", \"left\", \"right\"]\n self.Environment = Environment\n self.G = None\n self.random_threshold = 0.9\n self.alpha = 0.9\n self.init_G()\n self.succeeded = False\n\n def init_G(self):\n self.G = np.random.random((self.Environment.num_rows,self.Environment.num_cols)) * -1\n self.G[0, 0] = 0\n self.G[self.Environment.num_rows-1, self.Environment.num_cols-1] = 0\n for row in range(self.Environment.num_rows):\n for col in range(self.Environment.num_cols):\n if self.Environment.grid[row, col] == 1.0:\n self.G[row, col] = np.nan\n self.G = np.around(self.G, 2)\n\n def action_to_position(self, action):\n row, col = self.Environment.robot\n if action == \"up\":\n row = row-1\n elif action == \"down\":\n row = row+1\n elif action == \"left\":\n col = col-1\n elif action == \"right\":\n col = col+1\n else:\n raise Exception(\"No such action,please check again\")\n return row, col\n\n def chooseRandomAction(self):\n while True:\n action = np.random.choice(self.action_space)\n row, col = self.action_to_position(action)\n if self.Environment.is_allowed_move(row, col):\n return action\n\n def chooseBestAction(self):\n payoff = []\n for action in self.action_space:\n row, col = self.action_to_position(action)\n if self.Environment.is_allowed_move(row, col):\n payoff.append(self.G[row, col])\n else:\n payoff.append(np.nan)\n action = np.nanargmax(payoff)\n return self.action_space[action]\n\n def BestActionPlusRandom(self):\n if np.random.random_sample() > self.random_threshold:\n action = self.chooseBestAction()\n else:\n action = self.chooseRandomAction()\n position = self.action_to_position(action)\n return position\n\n def update_G(self):\n memory = self.memory.copy()\n memory_reverse = memory[::-1]\n rewards = self.rewards.copy()\n rewards_reverse = rewards[::-1]\n target = 0\n for idx, state in enumerate(memory_reverse):\n target += rewards_reverse[idx]\n self.G[state] += self.alpha*(target - self.G[state])\n\n def learn(self, episodes=300, max_count=300):\n self.init_G()\n divider = episodes//10\n for episode in range(episodes):\n if episode % divider == 0:\n self.random_threshold *= 0.9\n self.alpha *= 0.9\n self.Environment.robot = (0, 0)\n self.memory = [(0, 0)]\n self.rewards = [0.0]\n count = 0\n while not self.Environment.is_game_over():\n count += 1\n self.Environment.grid[self.Environment.robot] = 0\n self.Environment.robot = self.BestActionPlusRandom()\n self.Environment.grid[self.Environment.robot] = 2\n self.memory.append(tuple(self.Environment.robot))\n reward = 0 if self.Environment.is_game_over() else -1\n if self.Environment.is_game_over():\n self.succeeded = True\n self.rewards.append(reward)\n\n if count >= max_count:\n break\n self.update_G()\n self.G = np.around(self.G, 2)\n\n def pretraining_heatmap(self):\n f, ax = plt.subplots(figsize=(3, 3))\n df = pd.DataFrame(self.G)\n df = df.round(3)\n df[df < -99] = -99\n mask = df.isnull()\n ax = sns.heatmap(df,\n mask=mask,\n square=True,\n linewidths=0.3,\n vmin=-1,\n vmax=0,\n cmap=\"RdBu_r\",\n annot=True,\n cbar=False,\n )\n\n ax.set_facecolor(\"#401c44\")\n return f, ax\n\n def post_training_heatmap(self):\n f, ax = plt.subplots(figsize=(3, 3))\n df = pd.DataFrame(self.G)\n df = df.round(0)\n df[df < -99] = -99\n mask = df.isnull()\n ax = sns.heatmap(df,\n mask=mask,\n square=True,\n linewidths=0.3,\n vmin=-20,\n vmax=0,\n cmap=\"RdBu_r\",\n annot=True,\n cbar=False,\n )\n ax.set_facecolor(\"#401c44\")\n best_path = self.get_shortest_path()\n for rect in best_path:\n ax.add_patch(\n Rectangle(rect, 1, 1,\n fill=False,\n edgecolor='blue',\n lw=3\n )\n )\n\n return f, ax\n\n def get_shortest_path(self):\n best_path = [(0, 0)]\n self.Environment.robot = (0, 0)\n row, col = self.Environment.robot\n while (row, col) != (3, 3):\n action = self.chooseBestAction()\n position = self.action_to_position(action)\n best_path.append(position)\n self.Environment.robot = position\n row, col = self.Environment.robot\n best_path = [(j, i) for i, j in best_path]\n return best_path\n","repo_name":"FHL1998/Frozen_Lake_Web","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18990623897","text":"from typing import Type, Optional\n\nfrom ..entities import BaseNotificationCase\nfrom ..models import ResponseClientNotificationsModel\nfrom ..repos import ClientNotificationRepo\n\n\nclass ClientNotificationsListCase(BaseNotificationCase):\n \"\"\"\n Список оповещений клиента\n \"\"\"\n\n def __init__(self, client_notification_repo: Type[ClientNotificationRepo]) -> None:\n self.client_notification_repo: ClientNotificationRepo = client_notification_repo()\n\n async def __call__(\n self, user_id: int, limit: int, offset: int\n ) -> ResponseClientNotificationsModel:\n notifications, next_page = await self.client_notification_repo.list(\n user_id=user_id, limit=limit, offset=offset\n )\n await self.client_notification_repo.set_new(\n is_new=False, user_id=user_id, ids=[notification.id for notification in notifications]\n )\n return ResponseClientNotificationsModel(next_page=next_page, results=notifications)\n","repo_name":"r2r2/strana_backend","sub_path":"cabinet/src/notifications/use_cases/client_notifications_list.py","file_name":"client_notifications_list.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25199488023","text":"################################################################################\nfrom common import *\nfrom PyQt4 import QtCore, QtGui\n\n################################################################################\nclass CCtlIndexer:\n def __init__(self, name):\n # Default button policy\n self.button_policy = QtGui.QSizePolicy()\n self.button_policy.setHorizontalPolicy(QtGui.QSizePolicy.Minimum)\n self.button_policy.setVerticalPolicy(QtGui.QSizePolicy.Minimum)\n\n # Edit-box\n self.edit = QtGui.QLineEdit()\n\n # Decrease button\n self.decr = QtGui.QPushButton(\"-\")\n self.decr.setSizePolicy(self.button_policy)\n self.decr.setMinimumSize(1, 1)\n\n # Increase button\n self.incr = QtGui.QPushButton(\"+\")\n self.incr.setSizePolicy(self.button_policy)\n self.incr.setMinimumSize(1, 1)\n\n # Layout\n self.layout = QtGui.QHBoxLayout()\n self.layout.setAlignment(QtCore.Qt.AlignLeft)\n self.layout.addWidget(self.edit)\n self.layout.addWidget(self.decr)\n self.layout.addWidget(self.incr)\n\n # Widget\n self.widget = QtGui.QWidget()\n self.widget.setLayout(self.layout)\n\n################################################################################\nclass CControls:\n #---------------------------------------------------------------------------\n def CreateUI(self):\n # Custom indexers\n self.control = [\n CCtlIndexer(\"mesh\"),\n CCtlIndexer(\"trig\"),\n CCtlIndexer(\"line\"),\n CCtlIndexer(\"vert\")]\n\n # Create control widget\n self.control_layout = QtGui.QVBoxLayout()\n\n # Add custom indexers to layout \n# for c in self.control: \n# self.control_layout.addWidget(c.widget)\n\n # Apply layout\n self.control_layout.setAlignment(QtCore.Qt.AlignTop)\n self.control_widget = QtGui.QWidget()\n self.control_widget.setLayout(self.control_layout)\n\n #---------------------------------------------------------------------------\n def CreateRootWidget(self, widget):\n self.root_layout = QtGui.QHBoxLayout()\n self.root_layout.addWidget(widget)\n self.root_layout.addWidget(self.control_widget)\n self.root_widget = QtGui.QWidget() \n self.root_widget.setLayout(self.root_layout)\n\n #---------------------------------------------------------------------------\n def CreateWindow(self, title, width, height):\n self.wnd = QtGui.QMainWindow()\n self.wnd.setWindowTitle(title)\n self.wnd.setCentralWidget(self.root_widget)\n self.wnd.setMinimumSize(width, height)\n self.wnd.show()\n\n #---------------------------------------------------------------------------\n CTL_MESH = 0\n CTL_TRIG = 1\n CTL_LINE = 2\n CTL_VERT = 3\n","repo_name":"afomins/terrain","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17252275766","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n\nclass VisualExtractor(nn.Module):\n def __init__(self, args):\n super(VisualExtractor, self).__init__()\n self.visual_extractor = args.visual_extractor\n self.pretrained = args.visual_extractor_pretrained\n model = getattr(models, self.visual_extractor)(pretrained=self.pretrained)\n modules = list(model.children())[:-2]\n self.model = nn.Sequential(*modules)\n self.avg_fnt = torch.nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n\n def forward(self, images):\n patch_feats = self.model(images)\n avg_feats = self.avg_fnt(patch_feats).squeeze().reshape(-1, patch_feats.size(1))\n batch_size, feat_size, _, _ = patch_feats.shape\n patch_feats = patch_feats.reshape(batch_size, feat_size, -1).permute(0, 2, 1)\n return patch_feats, avg_feats\n","repo_name":"cuhksz-nlp/R2Gen","sub_path":"modules/visual_extractor.py","file_name":"visual_extractor.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"5"} +{"seq_id":"33603895468","text":"# https://judge.softuni.org/Contests/Compete/Index/1725#4\n\ndef faro_shuffle(seq, shuttles):\n middle_idx = len(seq) // 2\n\n result = seq\n for i in range(shuttles):\n first_half, second_half = result[:middle_idx], result[middle_idx:]\n\n current_shuttle_result = []\n while first_half or second_half:\n current_shuttle_result.extend([first_half.pop(0), second_half.pop(0)])\n\n result = current_shuttle_result\n\n return result\n\n\ncards = input().split(\" \")\nnumber_of_shuttles = int(input())\nprint(faro_shuffle(cards, number_of_shuttles))\n","repo_name":"yavor-gornalov/softuni_programming_fundamentals","sub_path":"python_fundamentals/08_lists_basics_exercise/05_faro_shuffle_ver2.py","file_name":"05_faro_shuffle_ver2.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"2572198038","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision.models as models\n\nclass EncoderCNN(nn.Module):\n def __init__(self):\n super(EncoderCNN,self).__init__()\n vgg=models.vgg16(pretrained=True)\n modules=list(vgg.features[i] for i in range(29))\n self.vgg=nn.Sequential(*modules)\n def forward(self,images):\n with torch.no_grad():\n features=self.vgg(images)\n #N,C,H,W=features.size()\n #features=features.view(N,C,H*W)\n #features=features.permute(0,2,1)\n return features\n\nclass DeepMIML(nn.Module):\n def __init__(self,L=1032,K=100):\n super(DeepMIML,self).__init__()\n self.L=L\n self.K=K\n self.conv1=nn.Conv2d(in_channels=512,out_channels=L*K,kernel_size=1)\n self.pool1=nn.MaxPool2d((K,1),stride=(1,1))\n self.activation=nn.Sigmoid()\n self.pool2=nn.MaxPool2d((1,14*14),stride=(1,1))\n def forward(self,features):\n N,C,H,W=features.size()\n n_instances=H*W\n conv1=self.conv1(features)\n conv1=conv1.view(N,self.L,self.K,n_instances)\n pool1=self.pool1(conv1)\n act=self.activation(pool1)\n pool2=self.pool2(act)\n \n out=pool2.view(N,self.L)\n print('out',out[0])\n return out\n\n","repo_name":"Epiphqny/Multiple-instance-learning","sub_path":"deepmiml/deepmiml.py","file_name":"deepmiml.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"5"} +{"seq_id":"40942016408","text":"#computes percentage error\n\nimport numpy as np\n\n\ndef compute_percentage_error(x, y):\n\n e = 0\n diff = [abs(x[i] - y[i]) for i in range(len(x))]\n\n for i in range (0, len(diff)):\n e += diff[i]\n\n e = ((e * 100)/ 255)\n #print(e)\n return e\n\ndef compute_error_matrix(x, M):\n\n M_transpose = np.transpose(M)\n vector = [0]*len(x)\n for i in range (len(M)):\n for j in range (0, len(x)):\n vector[j] = M_transpose[j][i]\n compute_percentage_error(x, vector) \n \n\n#Testing purposes\nif __name__ == '__main__':\n M = ([100, 2, 3],[0, 60, 25], [50, 140, 60])\n avg = np.array([0, 50, 100, 250])\n x = np.array([0, 70, 30])\n y = np.array([10, 40, 110, 200])\n #compute_percentage_error(avg, x)\n compute_error_matrix(x, M)\n","repo_name":"AdnanUddin/cj2014","sub_path":"percentage_error.py","file_name":"percentage_error.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30544448083","text":"\"\"\"\nSponge Knowledge Base\nDemo\n\"\"\"\n\nfrom java.util import Random\n\nclass AsynchronousProvidedActionArg(Action):\n def onConfigure(self):\n self.withLabel(\"Asynchronous provided argument\")\n self.withArgs([\n StringType(\"arg1\").withLabel(\"Argument 1\").withFeatures({\"multiline\":True, \"maxLines\":2}).withReadOnly().withProvided(\n ProvidedMeta().withValue().withOverwrite()),\n StringType(\"arg2\").withLabel(\"Argument 2\").withReadOnly().withProvided(\n ProvidedMeta().withValue().withOverwrite().withDependency(\"arg1\")),\n ]).withNonCallable().withFeatures({\"cancelLabel\":\"Close\"})\n def onProvideArgs(self, context):\n if \"arg1\" in context.provide:\n context.provided[\"arg1\"] = ProvidedValue().withValue(\"v\" + str(Random().nextInt(100) + 1))\n if \"arg2\" in context.provide:\n TimeUnit.SECONDS.sleep(5)\n context.provided[\"arg2\"] = ProvidedValue().withValue(\"First arg is \" + context.current[\"arg1\"])","repo_name":"softelnet/sponge","sub_path":"sponge-app/sponge-app-demo-service/sponge/sponge_demo_asynchronous_provided_action_arg.py","file_name":"sponge_demo_asynchronous_provided_action_arg.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"5"} +{"seq_id":"43265716511","text":"import itertools\n \ndef schet(kolvo: list, res=0) -> int:\n c = 0\n for num, group in itertools.groupby(kolvo):\n lg = len(list(group)) \n c += lg\n if lg >= 3: \n return schet(kolvo[:c-lg]+kolvo[c:], res+lg) \n return res\n \n \n \nprint(schet(input().split())) \n","repo_name":"Ultimatereo/ITMO-UNIVERSITY","sub_path":"algorithms/sem1/lab2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"21305091639","text":"import pandas as pd\nimport numpy as np\n\nboys = pd.DataFrame({'k':['K0','K1','K2'],'age':[1,2,3]})\ngirls = pd.DataFrame({'k':['K0','K0','K3'],'age':[4,5,6]})\n\nprint(boys)\n'''\n age k\n0 1 K0\n1 2 K1\n2 3 K2\n'''\nprint(girls)\n'''\n age k\n0 4 K0\n1 5 K0\n2 6 K3\n'''\n\n# 目前 age 欄位是重複的,我們為了要區別 boy 與 girl,必須要在新的合併表格中,為 age 欄位取新的名字\n# 使用 suffixes 屬性即可辦到\nres = pd.merge(boys,girls, on='k', suffixes=['_boy','_girl'], how='left')\nprint(res)\n","repo_name":"ivan1003hsu/python_work","sub_path":"pandas/pandas-2.py","file_name":"pandas-2.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12797130231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 18:09:57 2018\n\n@author: Bruno\n\"\"\"\nx = 0\nfobj = open('large_sum.txt')\nfor line in fobj:\n intline = int(line )\n x = x + intline\nfobj.close()\nprint()\nprint('Die Summe beträgt:' ,x)","repo_name":"brunobieberstein/saves","sub_path":"Aufgaben/large_sum.py","file_name":"large_sum.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6990630738","text":"from threading import Thread, Semaphore\nimport random\nimport time\n\n\ndef cesuo(name, sem):\n sem.acquire()\n print(\"%s进入厕所\" % name)\n # 随机生成上厕所的等待时间\n time.sleep(random.randint(1, 5))\n print(\"%s出了厕所\" % name)\n sem.release()\n\n\nif __name__ == '__main__':\n # 设置只能3个人同时使用资源\n # 在这里的实际意义就是只能有3个人同时在厕所\n sem = Semaphore(3)\n for i in range(30):\n t = Thread(target=cesuo, args=(i, sem))\n t.start()\n","repo_name":"jonbenzhang/python-","sub_path":"01basic_knowledge/base_model/04_多线程/06信号量/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22995899301","text":"from nltk.stem import PorterStemmer, WordNetLemmatizer\nfrom nltk import pos_tag\nfrom nltk.corpus import wordnet\nimport enchant\nimport collections\nimport re\nimport os\n\n\nstemmer = PorterStemmer()\nlemmatizer = WordNetLemmatizer()\nisEnglish = enchant.Dict('en_US')\nallowed_ratio = 0.55\n\n\nregex = re.compile( '(\\\\n|\\?|_|\\[|\\]|{|}|,|\\.|\"|-|!|@|#|\\$|%|\\^|&|\\*|\\(|\\)|~|\\||\\'|/|\\\\\\\\|;|:|<|>)')\n\ninput_folder_path = os.path.join(os.getcwd(), \"parsed\")\noutput_folder_path = os.path.join(os.getcwd(), \"featured\")\n\ndef get_wordnet_pos(word):\n treebank_tag = pos_tag(word)[0][1] # get only pos\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n if len(word) > 3 and word[-3:] == \"ing\":\n return 'v'\n else:\n return 'n' # noun is default\n\n\ndef make_dict():\n c = 1\n for root, subdirs, files in os.walk(input_folder_path):\n for file in files:\n try:\n file_path = os.path.join(root, file)\n print(\"#{} File: {}\".format(c ,file_path))\n with open(file_path) as f:\n\n data = f.read()\n data = regex.sub(' ', data).lower().split(' ')\n valid_text = [el for el in data if el != '' and isEnglish.check(el)]\n english_ration = len(valid_text)/len(data)\n if english_ration > allowed_ratio: # if english text > allowed_ratio%\n for word in valid_text:\n tag = get_wordnet_pos(word)\n word = lemmatizer.lemmatize(word, tag)\n\n\n word_dict = dict(collections.Counter(valid_text)) # count words in text\n\n for k, v in word_dict.items():\n word_dict[k] = v/len(valid_text) # calc words' frequency\n\n out_file_path = os.path.join(output_folder_path, file)\n r = open(out_file_path, 'w')\n r.write(str(word_dict))\n r.close()\n\n er = open('log.txt', 'a')\n er.write(\"#{} 0 {} {}\\n\".format(c, english_ration, file))\n er.close()\n\n else:\n er = open('log.txt', 'a')\n er.write(\"#{} 1 NonEn {} {}\\n\".format(c, english_ration, file))\n er.close()\n\n except:\n er = open('log.txt', 'a')\n er.write(\"#{} 1 Exc {}\\n\".format(c, file))\n er.close()\n continue\n c += 1\n\n\ndef test():\n\n text = \"\"\"I was thinking 雷德驤,長安人,太 що я не такий\"\"\"\n\n text = regex.sub(' ', text).lower().split(' ')\n\n valid_text = [el for el in text if el != '' and isEnglish.check(el)]\n\n print(str(len(text)) + \" vs \" + len(valid_text).__str__())\n print(text)\n print(valid_text)\n\n for word in text:\n lemma = lemmatizer.lemmatize(word, get_wordnet_pos(word))\n print(lemma)\n\n\nif __name__ == \"__main__\":\n make_dict()\n # test()\n","repo_name":"Hirep/conversation-bot","sub_path":"ml/make_dict.py","file_name":"make_dict.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42356208684","text":"import datetime\nimport io\nimport json\nimport re\nimport warnings\n\nimport google.auth\nfrom google.oauth2 import service_account\nimport googleapiclient.discovery\nimport googleapiclient.errors\nfrom ..lib import job_model\nfrom ..lib import retry_util\nimport pytz\nimport tenacity\n\n\n# The google v1 provider directly added the bigquery scope, but the v1alpha2\n# API automatically added:\n# - https://www.googleapis.com/auth/compute\n# - https://www.googleapis.com/auth/devstorage.full_control\n# - https://www.googleapis.com/auth/genomics\n# - https://www.googleapis.com/auth/logging.write\n# - https://www.googleapis.com/auth/monitoring.write\n#\n# With the addition of the google v2 provider, we explicitly set all of these\n# scopes such that existing user code continues to work.\n# Note that with the API (for both v2 and cls_v2 provider), the\n# `https://www.googleapis.com/auth/cloud-platform` scope is automatically added.\n# See\n# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.pipelines/run#serviceaccount\nDEFAULT_SCOPES = [\n 'https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/compute',\n 'https://www.googleapis.com/auth/devstorage.full_control',\n 'https://www.googleapis.com/auth/genomics',\n 'https://www.googleapis.com/auth/logging.write',\n 'https://www.googleapis.com/auth/monitoring.write',\n]\n\n\n# When attempting to cancel an operation that is already completed\n# (succeeded, failed, or canceled), the response will include:\n# \"error\": {\n# \"code\": 400,\n# \"status\": \"FAILED_PRECONDITION\",\n# }\nFAILED_PRECONDITION_CODE = 400\nFAILED_PRECONDITION_STATUS = 'FAILED_PRECONDITION'\n\n# List of Compute Engine zones, which enables simple wildcard expansion.\n# We could look this up dynamically, but new zones come online\n# infrequently enough, this is easy to keep up with.\n# Also - the Pipelines API may one day directly support zone wildcards.\n#\n# To refresh this list:\n# gcloud compute zones list --format='value(name)' \\\n# | sort | awk '{ printf \" '\\''%s'\\'',\\n\", $1 }'\n_ZONES = [\n 'asia-east1-a',\n 'asia-east1-b',\n 'asia-east1-c',\n 'asia-east2-a',\n 'asia-east2-b',\n 'asia-east2-c',\n 'asia-northeast1-a',\n 'asia-northeast1-b',\n 'asia-northeast1-c',\n 'asia-northeast2-a',\n 'asia-northeast2-b',\n 'asia-northeast2-c',\n 'asia-northeast3-a',\n 'asia-northeast3-b',\n 'asia-northeast3-c',\n 'asia-south1-a',\n 'asia-south1-b',\n 'asia-south1-c',\n 'asia-south2-a',\n 'asia-south2-b',\n 'asia-south2-c',\n 'asia-southeast1-a',\n 'asia-southeast1-b',\n 'asia-southeast1-c',\n 'asia-southeast2-a',\n 'asia-southeast2-b',\n 'asia-southeast2-c',\n 'australia-southeast1-a',\n 'australia-southeast1-b',\n 'australia-southeast1-c',\n 'australia-southeast2-a',\n 'australia-southeast2-b',\n 'australia-southeast2-c',\n 'europe-central2-a',\n 'europe-central2-b',\n 'europe-central2-c',\n 'europe-north1-a',\n 'europe-north1-b',\n 'europe-north1-c',\n 'europe-west1-b',\n 'europe-west1-c',\n 'europe-west1-d',\n 'europe-west2-a',\n 'europe-west2-b',\n 'europe-west2-c',\n 'europe-west3-a',\n 'europe-west3-b',\n 'europe-west3-c',\n 'europe-west4-a',\n 'europe-west4-b',\n 'europe-west4-c',\n 'europe-west6-a',\n 'europe-west6-b',\n 'europe-west6-c',\n 'northamerica-northeast1-a',\n 'northamerica-northeast1-b',\n 'northamerica-northeast1-c',\n 'northamerica-northeast2-a',\n 'northamerica-northeast2-b',\n 'northamerica-northeast2-c',\n 'southamerica-east1-a',\n 'southamerica-east1-b',\n 'southamerica-east1-c',\n 'us-central1-a',\n 'us-central1-b',\n 'us-central1-c',\n 'us-central1-f',\n 'us-east1-b',\n 'us-east1-c',\n 'us-east1-d',\n 'us-east4-a',\n 'us-east4-b',\n 'us-east4-c',\n 'us-west1-a',\n 'us-west1-b',\n 'us-west1-c',\n 'us-west2-a',\n 'us-west2-b',\n 'us-west2-c',\n 'us-west3-a',\n 'us-west3-b',\n 'us-west3-c',\n 'us-west4-a',\n 'us-west4-b',\n 'us-west4-c',\n]\n\n\ndef get_zones(input_list):\n \"\"\"Returns a list of zones based on any wildcard input.\n\n This function is intended to provide an easy method for producing a list\n of desired zones for a pipeline to run in.\n\n The Pipelines API default zone list is \"any zone\". The problem with\n \"any zone\" is that it can lead to incurring Cloud Storage egress charges\n if the GCE zone selected is in a different region than the GCS bucket.\n See https://cloud.google.com/storage/pricing#network-egress.\n\n A user with a multi-region US bucket would want to pipelines to run in\n a \"us-*\" zone.\n A user with a regional bucket in US would want to restrict pipelines to\n run in a zone in that region.\n\n Rarely does the specific zone matter for a pipeline.\n\n This function allows for a simple short-hand such as:\n [ \"us-*\" ]\n [ \"us-central1-*\" ]\n These examples will expand out to the full list of US and us-central1 zones\n respectively.\n\n Args:\n input_list: list of zone names/patterns\n\n Returns:\n A list of zones, with any wildcard zone specifications expanded.\n \"\"\"\n if not input_list:\n return []\n\n output_list = []\n\n for zone in input_list:\n if zone.endswith('*'):\n prefix = zone[:-1]\n output_list.extend([z for z in _ZONES if z.startswith(prefix)])\n else:\n output_list.append(zone)\n\n return output_list\n\n\nclass Label(job_model.LabelParam):\n \"\"\"Name/value label metadata for a Google Genomics pipeline.\n\n Attributes:\n name (str): the label name.\n value (str): the label value (optional).\n \"\"\"\n _allow_reserved_keys = True\n __slots__ = ()\n\n\ndef build_pipeline_labels(job_metadata, task_metadata, task_id_pattern=None):\n \"\"\"Build a set() of standard job and task labels.\n\n Args:\n job_metadata: Job metadata, such as job-id, job-name, and user-id.\n task_metadata: Task metadata, such as the task-id.\n task_id_pattern: A pattern for the task-id value, such as \"task-%d\"; the\n original google label values could not be strictly numeric, so \"task-\"\n was prepended.\n\n Returns:\n A set of standard dsub Label() objects to attach to a pipeline.\n \"\"\"\n labels = {\n Label(name, job_metadata[name])\n for name in ['job-name', 'job-id', 'user-id', 'dsub-version']\n }\n\n task_id = task_metadata.get('task-id')\n if task_id is not None: # Check for None (as 0 is conceivably valid)\n if task_id_pattern:\n task_id = task_id_pattern % task_id\n labels.add(Label('task-id', str(task_id)))\n\n task_attempt = task_metadata.get('task-attempt')\n if task_attempt is not None:\n labels.add(Label('task-attempt', str(task_attempt)))\n\n return labels\n\n\ndef parse_rfc3339_utc_string(rfc3339_utc_string):\n \"\"\"Converts a datestamp from RFC3339 UTC to a datetime.\n\n Args:\n rfc3339_utc_string: a datetime string in RFC3339 UTC \"Zulu\" format\n\n Returns:\n A datetime.\n \"\"\"\n\n # The timestamp from the Google Operations are all in RFC3339 format, but\n # they are sometimes formatted to millisconds, microseconds, sometimes\n # nanoseconds, and sometimes only seconds:\n # * 2016-11-14T23:05:56Z\n # * 2016-11-14T23:05:56.010Z\n # * 2016-11-14T23:05:56.010429Z\n # * 2016-11-14T23:05:56.010429380Z\n m = re.match(r'(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).?(\\d*)Z',\n rfc3339_utc_string)\n\n # It would be unexpected to get a different date format back from Google.\n # If we raise an exception here, we can break people completely.\n # Instead, let's just return None and people can report that some dates\n # are not showing up.\n # We might reconsider this approach in the future; it was originally\n # established when dates were only used for display.\n if not m:\n return None\n\n groups = m.groups()\n if len(groups[6]) not in (0, 3, 6, 9):\n return None\n\n # Create a UTC datestamp from parsed components\n # 1- Turn components 0-5 from strings to integers\n # 2- If the last component does not exist, set it to 0.\n # If it does exist, make sure to interpret it as milliseconds.\n g = [int(val) for val in groups[:6]]\n\n fraction = groups[6]\n if not fraction:\n micros = 0\n elif len(fraction) == 3:\n micros = int(fraction) * 1000\n elif len(fraction) == 6:\n micros = int(fraction)\n elif len(fraction) == 9:\n # When nanoseconds are provided, we round\n micros = int(round(int(fraction) // 1000))\n else:\n assert False, 'Fraction length not 0, 6, or 9: {}'.format(len(fraction))\n\n try:\n return datetime.datetime(\n g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)\n except ValueError as e:\n assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(\n rfc3339_utc_string, e)\n\n\ndef get_operation_full_job_id(op):\n \"\"\"Returns the job-id or job-id.task-id for the operation.\"\"\"\n job_id = op.get_field('job-id')\n task_id = op.get_field('task-id')\n if task_id:\n return '%s.%s' % (job_id, task_id)\n else:\n return job_id\n\n\ndef _cancel_batch(batch_fn, cancel_fn, ops):\n \"\"\"Cancel a batch of operations.\n\n Args:\n batch_fn: API-specific batch function.\n cancel_fn: API-specific cancel function.\n ops: A list of operations to cancel.\n\n Returns:\n A list of operations canceled and a list of error messages.\n \"\"\"\n\n # We define an inline callback which will populate a list of\n # successfully canceled operations as well as a list of operations\n # which were not successfully canceled.\n\n canceled = []\n failed = []\n\n def handle_cancel_response(request_id, response, exception):\n \"\"\"Callback for the cancel response.\"\"\"\n del response # unused\n\n if exception:\n # We don't generally expect any failures here, except possibly trying\n # to cancel an operation that is already canceled or finished.\n #\n # If the operation is already finished, provide a clearer message than\n # \"error 400: Bad Request\".\n\n msg = 'error %s: %s' % (exception.resp.status, exception.resp.reason)\n if exception.resp.status == FAILED_PRECONDITION_CODE:\n detail = json.loads(exception.content)\n status = detail.get('error', {}).get('status')\n if status == FAILED_PRECONDITION_STATUS:\n msg = 'Not running'\n\n failed.append({'name': request_id, 'msg': msg})\n else:\n canceled.append({'name': request_id})\n\n return\n\n # Set up the batch object\n batch = batch_fn(callback=handle_cancel_response)\n\n # The callback gets a \"request_id\" which is the operation name.\n # Build a dict such that after the callback, we can lookup the operation\n # objects by name\n ops_by_name = {}\n for op in ops:\n op_name = op.get_field('internal-id')\n ops_by_name[op_name] = op\n try:\n batch.add(cancel_fn(name=op_name, body={}), request_id=op_name)\n except TypeError:\n # Batch API delete_job method doesn't take a body parameter\n batch.add(cancel_fn(name=op_name), request_id=op_name)\n\n # Cancel the operations\n batch.execute()\n\n # Iterate through the canceled and failed lists to build our return lists\n canceled_ops = [ops_by_name[op['name']] for op in canceled]\n error_messages = []\n for fail in failed:\n op = ops_by_name[fail['name']]\n error_messages.append(\"Error canceling '%s': %s\" %\n (get_operation_full_job_id(op), fail['msg']))\n\n return canceled_ops, error_messages\n\n\ndef cancel(batch_fn, cancel_fn, ops):\n \"\"\"Cancel operations.\n\n Args:\n batch_fn: API-specific batch function.\n cancel_fn: API-specific cancel function.\n ops: A list of operations to cancel.\n\n Returns:\n A list of operations canceled and a list of error messages.\n \"\"\"\n\n # Canceling many operations one-by-one can be slow.\n # The Pipelines API doesn't directly support a list of operations to cancel,\n # but the requests can be performed in batch.\n\n canceled_ops = []\n error_messages = []\n\n max_batch = 256\n total_ops = len(ops)\n for first_op in range(0, total_ops, max_batch):\n batch_canceled, batch_messages = _cancel_batch(\n batch_fn, cancel_fn, ops[first_op:first_op + max_batch])\n canceled_ops.extend(batch_canceled)\n error_messages.extend(batch_messages)\n\n return canceled_ops, error_messages\n\n\n# Exponential backoff retrying API discovery.\n# Maximum 23 retries. Wait 1, 2, 4 ... 64, 64, 64... seconds.\n@tenacity.retry(\n stop=tenacity.stop_after_attempt(retry_util.MAX_API_ATTEMPTS),\n retry=retry_util.retry_api_check,\n wait=tenacity.wait_exponential(multiplier=1, max=64),\n retry_error_callback=retry_util.on_give_up)\n# For API errors dealing with auth, we want to retry, but not as often\n# Maximum 4 retries. Wait 1, 2, 4, 8 seconds.\n@tenacity.retry(\n stop=tenacity.stop_after_attempt(retry_util.MAX_AUTH_ATTEMPTS),\n retry=retry_util.retry_auth_check,\n wait=tenacity.wait_exponential(multiplier=1, max=8),\n retry_error_callback=retry_util.on_give_up)\ndef setup_service(api_name, api_version, credentials=None):\n \"\"\"Configures genomics API client.\n\n Args:\n api_name: Name of the Google API (for example: \"genomics\")\n api_version: Version of the API (for example: \"v2alpha1\")\n credentials: Credentials to be used for the gcloud API calls.\n\n Returns:\n A configured Google Genomics API client with appropriate credentials.\n \"\"\"\n # dsub is not a server application, so it is ok to filter this warning.\n warnings.filterwarnings(\n 'ignore', 'Your application has authenticated using end user credentials')\n if not credentials:\n credentials, _ = google.auth.default()\n # Set cache_discovery to False because we use google-auth\n # See https://github.com/googleapis/google-api-python-client/issues/299\n return googleapiclient.discovery.build(\n api_name, api_version, cache_discovery=False, credentials=credentials)\n\n\ndef credentials_from_service_account_info(credentials_file):\n with io.open(credentials_file, 'r', encoding='utf-8') as json_fi:\n credentials_info = json.load(json_fi)\n return service_account.Credentials.from_service_account_info(credentials_info)\n\n\nclass Api(object):\n \"\"\"Wrapper around API execution with exponential backoff retries.\"\"\"\n\n # Exponential backoff retrying API execution\n # Maximum 23 retries. Wait 1, 2, 4 ... 64, 64, 64... seconds.\n @tenacity.retry(\n stop=tenacity.stop_after_attempt(retry_util.MAX_API_ATTEMPTS),\n retry=retry_util.retry_api_check,\n wait=tenacity.wait_exponential(multiplier=1, max=64),\n retry_error_callback=retry_util.on_give_up)\n # For API errors dealing with auth, we want to retry, but not as often\n # Maximum 4 retries. Wait 1, 2, 4, 8 seconds.\n @tenacity.retry(\n stop=tenacity.stop_after_attempt(retry_util.MAX_AUTH_ATTEMPTS),\n retry=retry_util.retry_auth_check,\n wait=tenacity.wait_exponential(multiplier=1, max=8),\n retry_error_callback=retry_util.on_give_up)\n def execute(self, api):\n \"\"\"Executes operation.\n\n Args:\n api: The base API object\n\n Returns:\n A response body object\n \"\"\"\n return api.execute()\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"DataBiosphere/dsub","sub_path":"dsub/providers/google_base.py","file_name":"google_base.py","file_ext":"py","file_size_in_byte":14984,"program_lang":"python","lang":"en","doc_type":"code","stars":253,"dataset":"github-code","pt":"5"} +{"seq_id":"26669866333","text":"import logging\nimport os\nimport pathlib\nimport sys\nimport types\nimport string\n\nimport yaml\n\n\n# Local modules\n\nfrom modules.fail import fail\nfrom modules.messages import formatMessageList\nfrom modules.nestedns import objToNestedNs\nfrom modules.tools import readInput\n\n\n# Classes\n\nclass ConfigBase:\n \"\"\" Configuration bases operations \"\"\"\n\n @staticmethod\n def cleanup(subtree):\n \"\"\" Set all items to <item.value>\n (drop item.description and item.required) \"\"\"\n\n cleantree = {}\n\n for itemK, itemV in subtree.items():\n if isinstance(itemV, dict):\n if 'value' in itemV.keys():\n cleantree[itemK] = itemV['value']\n else:\n cleantree[itemK] = ConfigBase.cleanup(itemV)\n\n return cleantree\n\n def __init__(self, ctx, templateFile, instanceFile, create):\n self._ctx = ctx\n self._templateFile = templateFile\n self._instanceFile = instanceFile\n\n if not hasattr(self, '_noFileMsg'):\n self._noFileMsg = f\"Configuration file '{instanceFile}' does not exist\"\n\n if create:\n self._instance = None\n else:\n self._instance = self._read(instanceFile)\n\n # Public functions\n\n def get(self):\n \"\"\" Get cleaned up configuration as nested namespace \"\"\"\n return objToNestedNs(self.getObj())\n\n def getObj(self):\n \"\"\" Get cleaned up configuration as nested dicts \"\"\"\n if not self._instance:\n fail(self._noFileMsg)\n\n return ConfigBase.cleanup(self._instance)\n\n def create(self, hideDescriptions):\n \"\"\" Create new configuration from template \"\"\"\n try:\n # pylint: disable=unspecified-encoding\n with open(self._templateFile, 'r') as tmplFh:\n self._instance = yaml.load(tmplFh.read(), Loader=yaml.Loader)\n except IOError:\n fail(f\"Error reading from file {self._templateFile}\")\n\n self.edit(hideDescriptions)\n\n def edit(self, hideDescriptions):\n \"\"\" Edit existing configuration \"\"\"\n if not self._instance:\n fail(self._noFileMsg)\n\n pOps = types.SimpleNamespace()\n pOps.extend = lambda path, key: f'{path}.{key}'\n pOps.envName = lambda path: f\"SOOS_{type(self).__name__}{path.replace('.', '_')}\".upper()\n pOps.envValue = lambda path: os.getenv(pOps.envName(path))\n\n def getMsg(item, path, descs):\n printWidth = 70\n msg = '- '*int(printWidth/2) + '\\n'\n msg += f'Parameter: {path[1:]}\\n'\n msg += f\"Required: {'yes' if item['required'] else 'no'}\\n\"\n msg += f'Environment: {pOps.envName(path)}'\n if descs and not hideDescriptions:\n msg += '\\n'\n level = 0\n for desc in descs:\n msg += f\"\\n{formatMessageList([desc], printWidth, ' ', level)}\"\n level += 1\n msg += '\\n'\n return msg\n\n def getDefault(item, path):\n default = pOps.envValue(path)\n if not default:\n default = item['value'] if 'value' in item.keys() else ''\n if default is None:\n default = ''\n return default\n\n def setValueNonInteractive(item, path, descs):\n item['value'] = getDefault(item, path)\n if not item['value'] and item['required']:\n print(f\"{getMsg(item, path, descs)}\\n\"\n f\"CAN'T SET REQUIRED VALUE - SET ENVIRONMENT VARIABLE\\n\\n\"\n f\" '{pOps.envName(path)}'\\n\\n\"\n f\"TO THE APPROPRIATE VALUE.\\n\", file=sys.stderr)\n\n def setValueInteractive(item, path, descs, hideInput):\n default = getDefault(item, path)\n msg = getMsg(item, path, descs)\n\n specialChars = True\n while specialChars:\n print(msg)\n item['value'] = readInput('Enter value', default,\n not item['required'], hideInput, hideInput)\n if set(item['value']).difference(string.printable):\n print(\"Found one or more special characters in input\\n\"\n \"This might be a backspace or a tab\\n\\n\"\n \"Re-enter your input\")\n else:\n specialChars = False\n print()\n\n def editRecursive(item, path, descs, hideInput):\n\n if 'description' in list(item.keys()):\n descs.append(item['description'].strip())\n\n if 'value' in list(item.keys()):\n\n # Found an entry\n\n if self._ctx.ar.non_interactive:\n setValueNonInteractive(item, path, descs)\n\n else:\n setValueInteractive(item, path, descs, hideInput)\n\n else:\n\n # Recurse until we find an entry\n\n for key, value in item.items():\n if isinstance(value, dict):\n editRecursive(value, pOps.extend(path, key), descs, key == 'password')\n\n if 'description' in list(item.keys()):\n descs.pop()\n\n editRecursive(self._instance, '', [], False)\n\n self._write(self._instanceFile, self._instance)\n\n # Private functions\n\n def _readFile(self, fileName):\n contents = None\n\n if not pathlib.Path(fileName).is_file():\n logging.info(f\"File '{fileName}' does not exist\")\n else:\n try:\n # pylint: disable=unspecified-encoding\n with open(fileName, 'r') as ctFh:\n contents = ctFh.read()\n except IOError:\n fail(f\"Error reading from file {fileName}\")\n\n return contents\n\n def _read(self, fileName):\n contents = self._readFile(fileName)\n\n if contents:\n parsed = yaml.load(contents, Loader=yaml.Loader)\n else:\n parsed = None\n\n return parsed\n\n def _writeFile(self, fileName, contents):\n try:\n # pylint: disable=unspecified-encoding\n with open(fileName, 'w') as wFh:\n wFh.write(contents)\n except IOError:\n fail(f\"Error writing to file {fileName}\")\n\n def _write(self, fileName, contents):\n self._writeFile(fileName, yaml.dump(contents))\n","repo_name":"IBM/containerization-for-sap-s4hana","sub_path":"tools/modules/configbase.py","file_name":"configbase.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"5"} +{"seq_id":"19975734076","text":"import heroku3\nimport datetime\nimport logging\nfrom ravager.database.tasks import Tasks\nfrom ravager.config import APP_URL, HEROKU_API_TOKEN\nfrom urllib.parse import urlparse\n\nlogger = logging.getLogger(__file__)\n\n\ndef get_ravager():\n heroku_conn = heroku3.from_key(HEROKU_API_TOKEN)\n app_name = urlparse(APP_URL).netloc.split(\".\")\n if app_name[0] == \"www\":\n app_name = app_name[1]\n else:\n app_name = app_name[0]\n ravager = heroku_conn.apps()[app_name].dynos()[0]\n return ravager\n\n\ndef restart_required():\n ravager = get_ravager()\n state = ravager.state\n updated_at = ravager.updated_at.replace(tzinfo=None)\n now = datetime.datetime.now()\n diff = datetime.timedelta(hours=6)\n if state == \"up\" and now - updated_at >= diff:\n return True\n return False\n\n\ndef get_uptime():\n ravager = get_ravager()\n state = ravager.state\n if state == \"up\":\n now = datetime.datetime.now()\n shutdown_time = ravager.updated_at.replace(tzinfo=None) + datetime.timedelta(hours=24)\n return shutdown_time - now\n else:\n raise SystemExit(\"Heroku Dyno not up\")\n\n\ndef restart_dyno(admin=False):\n if admin or (restart_required() and Tasks().get_active_tasks() <= 0):\n logger.info(\"Restarting Dyno\")\n resp = get_ravager().restart()\n logger.info(\"Dyno Starting Up\")\n clear = Tasks().clear()\n logger.info(\"Cleared database with {} tasks\".format(clear))\n return resp\n\n","repo_name":"CoolFool/Ravager","sub_path":"ravager/helpers/heroku.py","file_name":"heroku.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"5"} +{"seq_id":"20695419468","text":"from __future__ import absolute_import\nimport operator\nimport uuid\n\nfrom oslo.config import cfg\nimport sqlalchemy\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.ext import declarative\nfrom sqlalchemy import types\n\nfrom gnocchi import indexer\nfrom gnocchi.openstack.common.db import exception\nfrom gnocchi.openstack.common.db.sqlalchemy import models\nfrom gnocchi.openstack.common.db.sqlalchemy import session\n\n\ncfg.CONF.import_opt('connection', 'gnocchi.openstack.common.db.options',\n group='database')\n\n\nBase = declarative.declarative_base()\n\n\nclass GUID(types.TypeDecorator):\n \"\"\"Platform-independent GUID type.\n\n Uses Postgresql's UUID type, otherwise uses\n CHAR(32), storing as stringified hex values.\n\n \"\"\"\n impl = types.CHAR\n\n def load_dialect_impl(self, dialect):\n if dialect.name == 'postgresql':\n return dialect.type_descriptor(postgresql.UUID())\n return dialect.type_descriptor(types.CHAR(32))\n\n def process_bind_param(self, value, dialect):\n if value is None:\n return value\n elif dialect.name == 'postgresql':\n return str(value)\n if not isinstance(value, uuid.UUID):\n return \"%.32x\" % uuid.UUID(value)\n # hexstring\n return \"%.32x\" % value\n\n def process_result_value(self, value, dialect):\n if value is None:\n return value\n return uuid.UUID(value)\n\n\nResourceEntity = sqlalchemy.Table(\n 'resource_entity',\n Base.metadata,\n sqlalchemy.Column('resource', GUID,\n sqlalchemy.ForeignKey('resource.id',\n ondelete=\"CASCADE\")),\n sqlalchemy.Column('entity', types.Text,\n sqlalchemy.ForeignKey('entity.name',\n ondelete=\"CASCADE\"))\n)\n\n\nclass Entity(Base, models.ModelBase):\n __tablename__ = 'entity'\n\n name = sqlalchemy.Column(types.Text, primary_key=True)\n\n\nclass Resource(Base, models.ModelBase):\n __tablename__ = 'resource'\n\n id = sqlalchemy.Column(GUID, primary_key=True)\n entities = sqlalchemy.orm.relationship(\n 'Entity',\n backref='resources',\n secondary=ResourceEntity)\n\n\nclass SQLAlchemyIndexer(indexer.IndexerDriver):\n def __init__(self, conf):\n self.engine_facade = session.EngineFacade.from_config(\n conf.database.connection, conf)\n\n def upgrade(self):\n engine = self.engine_facade.get_engine()\n Base.metadata.create_all(engine)\n\n def create_resource(self, uuid, entities=[]):\n session = self.engine_facade.get_session()\n with session.begin():\n # FIXME(jd) Seriously, THERE IS NOT NEED TO DO THAT. But someone\n # sucks, either me or the ORM. Please fix that so there's no\n # need to select before inserting FFS. What needs to be done is\n # an INSERT in resources and then an INSERT into ResourceEntity;\n # that last one should fails if the entity does not exist, so we\n # just have to raise back to the caller! I offer a pack of beer\n # to whoever fix that.\n loaded_entities = []\n for e in entities:\n entity = session.query(Entity).filter(Entity.name == e).first()\n if not entity:\n raise indexer.NoSuchEntity(e)\n loaded_entities.append(entity)\n r = Resource(id=uuid, entities=loaded_entities)\n session.add(r)\n return {\"id\": r['id'],\n 'entities': map(operator.attrgetter('name'),\n r['entities'])}\n\n def get_resource(self, uuid):\n session = self.engine_facade.get_session()\n with session.begin():\n q = session.query(Resource).filter(Resource.id == uuid)\n r = q.first()\n return {\"id\": r['id'],\n 'entities': map(operator.attrgetter('name'),\n r['entities'])}\n\n def create_entity(self, name):\n session = self.engine_facade.get_session()\n try:\n with session.begin():\n session.add(Entity(name=name))\n except exception.DBDuplicateEntry:\n raise indexer.EntityAlreadyExists(name)\n\n def delete_entity(self, name):\n session = self.engine_facade.get_session()\n with session.begin():\n session.query(Entity).filter(Entity.name == name).delete()\n","repo_name":"eglynn/gnocchi","sub_path":"gnocchi/indexer/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38499337605","text":"from pathlib import Path\nfrom unittest import mock\n\nimport pytest\n\nfrom pcapi import settings\nfrom pcapi.core.educational import models as educational_models\nfrom pcapi.core.educational.exceptions import CantGetImageFromUrl\nimport pcapi.core.educational.factories as educational_factories\nimport pcapi.core.offerers.factories as offerers_factories\nfrom pcapi.models import offer_mixin\nfrom pcapi.models import validation_status_mixin\nfrom pcapi.utils.date import format_into_utc_date\nfrom pcapi.utils.human_ids import dehumanize\nfrom pcapi.utils.human_ids import humanize\n\nimport tests\n\n\nIMAGES_DIR = Path(tests.__path__[0]) / \"files\"\nUPLOAD_FOLDER = settings.LOCAL_STORAGE_DIR / educational_models.CollectiveOffer.FOLDER\n\n\n@pytest.mark.usefixtures(\"db_session\")\nclass Returns200Test:\n def teardown_method(self, *args):\n \"\"\"clear images after each tests\"\"\"\n storage_folder = UPLOAD_FOLDER / educational_models.CollectiveOffer.__name__.lower()\n if storage_folder.exists():\n for child in storage_folder.iterdir():\n if not child.is_file():\n continue\n child.unlink()\n\n def test_duplicate_collective_offer_image(self, client):\n # Given\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(offerer=offerer, user__email=\"user@example.com\")\n venue = offerers_factories.VenueFactory(managingOfferer=offerer)\n image_oiseau_bytes = (IMAGES_DIR / \"mouette_full_size.jpg\").read_bytes()\n offer = educational_factories.CollectiveOfferFactory(\n venue=venue,\n imageId=\"00000125999998\",\n imageCredit=\"vision d'horreur selon Hitchcock\",\n imageCrop={\"gnagna\": \"Non\"},\n imageHasOriginal=False,\n )\n\n offer_id = offer.id\n educational_factories.CollectiveStockFactory(collectiveOffer=offer)\n\n with mock.patch(\"pcapi.core.educational.api.offer.get_image_from_url\", return_value=image_oiseau_bytes):\n response = client.with_session_auth(\"user@example.com\").post(f\"/collective/offers/{offer_id}/duplicate\")\n\n # Then\n duplicate = educational_models.CollectiveOffer.query.filter_by(id=dehumanize(response.json[\"id\"])).one()\n assert response.status_code == 201\n assert response.json[\"imageCredit\"] == offer.imageCredit\n assert response.json[\"imageUrl\"] == duplicate.imageUrl\n\n def test_duplicate_collective_offer(self, client):\n # Given\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(offerer=offerer, user__email=\"user@example.com\")\n venue = offerers_factories.VenueFactory(managingOfferer=offerer)\n offer = educational_factories.CollectiveOfferFactory(\n venue=venue,\n )\n offer_id = offer.id\n educational_factories.CollectiveStockFactory(collectiveOffer=offer)\n # When\n response = client.with_session_auth(\"user@example.com\").post(f\"/collective/offers/{offer_id}/duplicate\")\n\n # Then\n assert response.status_code == 201\n duplicate = educational_models.CollectiveOffer.query.filter_by(id=dehumanize(response.json[\"id\"])).one()\n assert response.json == {\n \"audioDisabilityCompliant\": False,\n \"mentalDisabilityCompliant\": False,\n \"motorDisabilityCompliant\": False,\n \"visualDisabilityCompliant\": False,\n \"id\": humanize(duplicate.id),\n \"bookingEmails\": [\n \"collectiveofferfactory+booking@example.com\",\n \"collectiveofferfactory+booking@example2.com\",\n ],\n \"dateCreated\": format_into_utc_date(duplicate.dateCreated),\n \"description\": offer.description,\n \"durationMinutes\": None,\n \"students\": [\"Lycée - Seconde\"],\n \"offerVenue\": {\"addressType\": \"other\", \"otherAddress\": \"1 rue des polissons, Paris 75017\", \"venueId\": \"\"},\n \"contactEmail\": \"collectiveofferfactory+contact@example.com\",\n \"contactPhone\": \"+33199006328\",\n \"hasBookingLimitDatetimesPassed\": False,\n \"offerId\": None,\n \"isActive\": True,\n \"isEditable\": True,\n \"nonHumanizedId\": duplicate.id,\n \"name\": offer.name,\n \"subcategoryId\": offer.subcategoryId,\n \"venue\": {\n \"audioDisabilityCompliant\": False,\n \"mentalDisabilityCompliant\": False,\n \"motorDisabilityCompliant\": False,\n \"visualDisabilityCompliant\": False,\n \"address\": \"1 boulevard Poissonnière\",\n \"bookingEmail\": venue.bookingEmail,\n \"city\": \"Paris\",\n \"comment\": None,\n \"dateCreated\": format_into_utc_date(venue.dateCreated),\n \"dateModifiedAtLastProvider\": format_into_utc_date(venue.dateModifiedAtLastProvider),\n \"departementCode\": \"75\",\n \"fieldsUpdated\": [],\n \"id\": humanize(venue.id),\n \"idAtProviders\": None,\n \"isVirtual\": False,\n \"lastProviderId\": None,\n \"latitude\": 48.87004,\n \"longitude\": 2.3785,\n \"managingOfferer\": {\n \"address\": \"1 boulevard Poissonnière\",\n \"city\": \"Paris\",\n \"dateCreated\": format_into_utc_date(venue.managingOfferer.dateCreated),\n \"dateModifiedAtLastProvider\": format_into_utc_date(\n venue.managingOfferer.dateModifiedAtLastProvider\n ),\n \"id\": humanize(offerer.id),\n \"idAtProviders\": None,\n \"isActive\": True,\n \"isValidated\": True,\n \"lastProviderId\": None,\n \"name\": venue.managingOfferer.name,\n \"postalCode\": \"75000\",\n \"siren\": venue.managingOfferer.siren,\n \"thumbCount\": 0,\n },\n \"managingOffererId\": humanize(offerer.id),\n \"nonHumanizedId\": venue.id,\n \"name\": venue.name,\n \"postalCode\": \"75000\",\n \"publicName\": venue.publicName,\n \"siret\": venue.siret,\n \"thumbCount\": 0,\n \"venueLabelId\": None,\n },\n \"venueId\": humanize(offer.venueId),\n \"status\": \"ACTIVE\",\n \"domains\": [],\n \"interventionArea\": [\"93\", \"94\", \"95\"],\n \"isCancellable\": False,\n \"imageCredit\": None,\n \"imageUrl\": None,\n \"isBookable\": True,\n \"collectiveStock\": {\n \"id\": humanize(duplicate.collectiveStock.id),\n \"isBooked\": False,\n \"isCancellable\": False,\n \"beginningDatetime\": format_into_utc_date(offer.collectiveStock.beginningDatetime),\n \"bookingLimitDatetime\": format_into_utc_date(offer.collectiveStock.bookingLimitDatetime),\n \"price\": 100.0,\n \"numberOfTickets\": 25,\n \"educationalPriceDetail\": None,\n \"isEducationalStockEditable\": True,\n },\n \"institution\": None,\n \"isVisibilityEditable\": True,\n \"templateId\": None,\n \"lastBookingStatus\": None,\n \"lastBookingId\": None,\n \"teacher\": None,\n \"isPublicApi\": False,\n }\n\n def test_duplicate_collective_offer_draft_offer(self, client):\n # Given\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(offerer=offerer, user__email=\"user@example.com\")\n venue = offerers_factories.VenueFactory(managingOfferer=offerer)\n offer = educational_factories.CollectiveOfferFactory(\n venue=venue,\n validation=offer_mixin.OfferValidationStatus.DRAFT,\n )\n offer_id = offer.id\n educational_factories.CollectiveStockFactory(collectiveOffer=offer)\n\n # When\n\n response = client.with_session_auth(\"user@example.com\").post(f\"/collective/offers/{offer_id}/duplicate\")\n\n assert response.status_code == 403\n assert response.json == {\"validation\": [\"l'offre ne passe pas la validation\"]}\n\n def test_duplicate_collective_offer_offerer_not_validated(self, client):\n # Given\n offerer = offerers_factories.OffererFactory(\n validationStatus=validation_status_mixin.ValidationStatus.REJECTED,\n )\n offerers_factories.UserOffererFactory(offerer=offerer, user__email=\"user@example.com\")\n venue = offerers_factories.VenueFactory(managingOfferer=offerer)\n offer = educational_factories.CollectiveOfferFactory(venue=venue)\n offer_id = offer.id\n educational_factories.CollectiveStockFactory(collectiveOffer=offer)\n\n # When\n\n response = client.with_session_auth(\"user@example.com\").post(f\"/collective/offers/{offer_id}/duplicate\")\n\n assert response.status_code == 403\n assert response.json == {\"offerer\": [\"la structure n'est pas autorisée à dupliquer l'offre\"]}\n\n def test_duplicate_collective_offer_image_not_found(self, client):\n # Given\n offerer = offerers_factories.OffererFactory()\n offerers_factories.UserOffererFactory(offerer=offerer, user__email=\"user@example.com\")\n venue = offerers_factories.VenueFactory(managingOfferer=offerer)\n offer = educational_factories.CollectiveOfferFactory(\n venue=venue,\n imageId=\"00000125999998\",\n imageCredit=\"vision d'horreur selon Hitchcock\",\n imageCrop={\"gnagna\": \"Non\"},\n imageHasOriginal=False,\n )\n\n offer_id = offer.id\n educational_factories.CollectiveStockFactory(collectiveOffer=offer)\n\n with mock.patch(\n \"pcapi.core.educational.api.offer.get_image_from_url\",\n side_effect=CantGetImageFromUrl,\n ):\n response = client.with_session_auth(\"user@example.com\").post(f\"/collective/offers/{offer_id}/duplicate\")\n\n # Then\n # duplicate = educational_models.CollectiveOffer.query.filter_by(id=dehumanize(response.json[\"id\"])).one()\n assert response.status_code == 404\n","repo_name":"mariedestandau/poc-next-pro","sub_path":"api/tests/routes/pro/post_duplicate_collective_offer_and_stock_test.py","file_name":"post_duplicate_collective_offer_and_stock_test.py","file_ext":"py","file_size_in_byte":10360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43614353778","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\nfrom Selection_tree_window import Ui_Selection_tree\nfrom Liquid_fuel_window import Ui_LIQUID_FUEL\nfrom Gaseous_fuel_window import Ui_GASEOUS_FUEL\nimport sys\n\napp = QtWidgets.QApplication(sys.argv)\n\nSelection_tree = QtWidgets.QMainWindow()\nui = Ui_Selection_tree()\nui.setupUi(Selection_tree)\nSelection_tree.show()\ndef open_Liquid():\n global LIQUID_FUEL\n LIQUID_FUEL = QtWidgets.QMainWindow()\n ui = Ui_LIQUID_FUEL()\n ui.setupUi(LIQUID_FUEL)\n Selection_tree.hide()\n LIQUID_FUEL.show()\n def Return_from_L():\n LIQUID_FUEL.hide()\n Selection_tree.show()\n ui.L_btn_back.clicked.connect(Return_from_L)\n\ndef open_Gaseous():\n global GASEOUS_FUEL\n GASEOUS_FUEL = QtWidgets.QMainWindow()\n ui = Ui_GASEOUS_FUEL()\n ui.setupUi(GASEOUS_FUEL)\n Selection_tree.hide()\n GASEOUS_FUEL.show()\n def Return_from_G():\n GASEOUS_FUEL.hide()\n Selection_tree.show()\n ui.G_btn_back.clicked.connect(Return_from_G)\ndef Close():\n Selection_tree.close()\n\nui.btn_To_zhidkoe.clicked.connect(open_Liquid)\nui.btn_To_gazoobraznoe.clicked.connect(open_Gaseous)\nui.btn_cancel.clicked.connect(Close)\nsys.exit(app.exec_())\n","repo_name":"PohjoinenJohtaja/Final_project","sub_path":"Project/Fulfilled.py","file_name":"Fulfilled.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12595396376","text":"user=int(input(\"enter your number: \"))\ni=0\na=[]\nb=\"\"\nwhile i<user:\n array=int(input(\"enter your number: \"))\n a.append(array)\n j=0\n while j<len(a):\n num=a[j]%10\n j=j+1\n b=b+str(num) \n i=i+1\nif int(b)/10==0:\n print(b,\"is divisible by 10 \",True)\nelse:\n print(b,\"is not divisible by 10 \",False)\n\n\n### or\n\narray=[85,25, 65, 21, 84]\ni=0\nb=\"\"\nwhile i<len(array):\n a=array[i]%10\n b+=str(a) \n i=i+1\nif int(b)/10==0:\n print(b, \"Yes\")\nelse:\n print(b, \"No\")\n","repo_name":"NingthemlaNingshen/Question-Bank","sub_path":"divisiblity.py","file_name":"divisiblity.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41400459661","text":"\nfrom file_logs.models import UploadFile, AllData\nimport pandas as pd \nfrom celery import shared_task\nfrom django.shortcuts import render, redirect\n\n\ndef read_file(file_path):\n if str(file_path).endswith('.csv'):\n data = pd.read_csv(file_path, low_memory=False)\n elif str(file_path).endswith(('.xlsx', '.xls')):\n data = pd.read_excel(file_path)\n else:\n data = pd.DataFrame([])\n return data\n\n@shared_task\ndef process_file(list_dict):\n\tfor data_dict in list_dict:\n\t\td = AllData(**data_dict)\n\t\td.save()\n\treturn 'done'","repo_name":"Drey-Tee/Drey-Data-Collation-App","sub_path":"file_logs/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70190288151","text":"from PyQt5.Qt import QLineEdit # For type annotation only, speeds up development in PyCharm.\n\nfrom ViewController import ViewController\n\n\nclass NewCaseController(ViewController.Dialog):\n \"\"\"\n A simple dialog that asks a user to fill in three fields.\n Naively assumes that the user has filled all fields, if they haven't the program will crash on certain actions\n later on during execution.\n \"\"\"\n case_reference_field = None # type: QLineEdit\n lab_reference_field = None # type: QLineEdit\n investigator_field = None # type: QLineEdit\n\n def __init__(self):\n \"\"\"\n Initiates with the given dialog UI file.\n \"\"\"\n super().__init__(\"newcase.ui\")\n","repo_name":"bordplate/Digidence","sub_path":"src/Controllers/NewCaseController.py","file_name":"NewCaseController.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15527157244","text":"\nimport dash\nfrom dash import dcc\nfrom dash import html\nfrom flask import app\nimport plotly.express as px\nimport pandas as pd\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objects as go\n\napp = dash.Dash(__name__)\n\n#Fair Market Rent 1 Bed \n\nfmrdata = pd.read_csv('Fair_Market_Rents.csv')\n\nfmrfig = px.scatter(fmrdata, x=\"County\", y=\"One Bed Price\", color=\"State\", height=1000,\ntitle=\"One Bedroom Properties' Fair Market Rent Prices By County and State\")\n\nfmrfig.update_layout = margin=dict(l=0, r=0, t=0, b=0)\n\ndef fair_market_rent(flask_app):\n \n fmr_app = dash.Dash(server=flask_app, name=\"Dashboard\", url_base_pathname='/fmr/')\n\n fmr_app.layout = html.Div(children=[\n\n dcc.Graph(\n id='example-graph',\n figure=fmrfig\n )\n ])\n\n return fmr_app\n\n@app.callback(\n Output(\"graph\", \"figure\"), \n [Input('width', '2000')], \n [State(\"graph\", \"figure\")])\ndef resize_figure(width, fig_json):\n fmrfig = go.Figure(fig_json)\n fmrfig.update_layout(width=int(width))\n\n return fmrfig\n\n#Fair Market Rent 2 Bed \n\nfmrdata2 = pd.read_csv('Fair_Market_Rents.csv')\n\nfmrfig2 = px.scatter(fmrdata, x=\"County\", y=\"Two Bed Price\", color=\"State\", height=1000,\ntitle=\"Two Bedroom Properties' Fair Market Rent Prices By County and State\")\n\nfmrfig2.update_layout = margin=dict(l=0, r=0, t=0, b=0)\n\ndef fair_market_rent2(flask_app):\n \n fmr_app2 = dash.Dash(server=flask_app, url_base_pathname='/fmr2/')\n\n fmr_app2.layout = html.Div(children=[\n\n dcc.Graph(\n id='example-graph',\n figure=fmrfig2\n )\n ])\n\n return fmr_app2\n\n@app.callback(\n Output(\"graph\", \"figure\"), \n [Input('width', '2000')], \n [State(\"graph\", \"figure\")])\ndef resize_figure(width, fig_json):\n fmrfig2 = go.Figure(fig_json)\n fmrfig2.update_layout(width=int(width))\n\n return fmrfig2\n\n#Fair Market Rent 3 Bed \n\nfmrdata3 = pd.read_csv('Fair_Market_Rents.csv')\n\nfmrfig3 = px.scatter(fmrdata, x=\"County\", y=\"Three Bed Price\", color=\"State\", height=1000,\ntitle=\"Three Bedroom Properties' Fair Market Rent Prices By County and State\")\n\nfmrfig3.update_layout = margin=dict(l=0, r=0, t=0, b=0)\n\ndef fair_market_rent3(flask_app):\n \n fmr_app3 = dash.Dash(server=flask_app, url_base_pathname='/fmr3/')\n\n fmr_app3.layout = html.Div(children=[\n\n dcc.Graph(\n id='example-graph',\n figure=fmrfig3\n )\n ])\n\n return fmr_app3\n\n@app.callback(\n Output(\"graph\", \"figure\"), \n [Input('width', '2000')], \n [State(\"graph\", \"figure\")])\ndef resize_figure(width, fig_json):\n fmrfig3 = go.Figure(fig_json)\n fmrfig3.update_layout(width=int(width))\n\n return fmrfig3\n\n#Fair Market Rent 4 Bed \n\nfmrdata4 = pd.read_csv('Fair_Market_Rents.csv')\n\nfmrfig4 = px.scatter(fmrdata, x=\"County\", y=\"Four Bed Price\", color=\"State\", height=1000,\ntitle=\"Four Bedroom Properties' Fair Market Rent Prices By County and State\")\n\nfmrfig4.update_layout = margin=dict(l=0, r=0, t=0, b=0)\n\ndef fair_market_rent4(flask_app):\n \n fmr_app4 = dash.Dash(server=flask_app, url_base_pathname='/fmr4/')\n\n fmr_app4.layout = html.Div(children=[\n\n dcc.Graph(\n id='example-graph',\n figure=fmrfig4\n )\n ])\n\n return fmr_app4\n\n@app.callback(\n Output(\"graph\", \"figure\"), \n [Input('width', '2000')], \n [State(\"graph\", \"figure\")])\ndef resize_figure(width, fig_json):\n fmrfig4 = go.Figure(fig_json)\n fmrfig4.update_layout(width=int(width))\n\n return fmrfig4\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"ethannakamura/RocketREI-Real-Estate-Investing-App","sub_path":"app/dash_application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"74432309592","text":"##\n# script per analizzare le immagini full frame e calcolare il gradiente x,y da confrontare con le misure di AMOS\n\nfrom astropy.io import fits as pyfits\nfrom m4.mini_OTT import timehistory as th\nfrom matplotlib import pyplot as plt\nfrom m4.ground import zernike\nimport numpy as np\nfrom m4 import noise\nfrom m4.configuration import config_folder_names as fold_name\nfrom m4.ground import rebinner\nimport os\n\n# definisco la maschera ma\nnx=425*4\nny=425*4\nx = np.linspace(1, nx, nx)\ny = np.linspace(1, ny, ny)\nxv, yv = np.meshgrid(x, y)\nx0=215.5*4;y0=215.5*4;R=(26.5+8)*4;\nmask1=(xv-x0)**2+(yv-y0)**2<R**2\nx0=215.5*4;y0=214.5*4;R=(196.5-10)*4;\nmask2=(xv-x0)**2+(yv-y0)**2>R**2\nma=mask1 | mask2\n\n##\n# calcolo l'average, tolgo zernike, \n# plotto rms della differenza tra i singoli frame e la media\n# plotto l'immagine dell'average \n\ntn=[]\n#tn.append('20230303_095811')\ntn.append('20230303_102203')\n\nidx=[0, 99]\n\nfl = th.fileList(tn[0])\nq0=th.averageFrames(idx[0],idx[1],fl)\n#q0.mask=ma \ncoeff, mat = zernike.zernikeFit(q0, [5,6])\nzernCoef=coeff\nq1=th.removeZernike(q0,[1,2,3,4,7,8])\n\nrr = []\nfor i in range(idx[0],idx[1]):\n qi = th.removeZernike(th.frame(i,fl)-q0,[1,2,3,4,7,8])\n rr.append(qi.std())\n\n\n\nplt.figure(1); plt.clf()\nplt.plot(rr, '-o'); plt.title('RMS-ref'); plt.legend(tn)\n\nplt.figure(2); plt.clf()\nplt.imshow(q1)\nplt.title(tn[0]+' RMS={:.2e}'.format(np.std(q1))+'\\n z5={:.2e}'.format(zernCoef[0])+', z6={:.2e}'.format(zernCoef[1])); \nplt.colorbar() \n\nplt.show()\n\n#\n# calcolo la differenza shiftando l'immagine di N pixel\n# rebinna la differenza\n\n#plt.close('all')\n\ndpix = 1\ndd = q1-np.roll(q1,(dpix,dpix),axis=(0,1))\n\nplt.figure(3);plt.clf();plt.imshow(dd);plt.colorbar();\nplt.title('PAR gradient \\n'+tn[0]+' RMS={:.2e}'.format(np.std(dd))+' shift {:d} pixel'.format(dpix)); \nplt.clim(-8e-9,8e-9)\nplt.show();\n\nK=2\nnew_shape=(dd.shape[0]/K, dd.shape[1]/K )\ndd_b=rebinner.rebin2DArray(dd, new_shape, sample=False)\n\nplt.figure(4);plt.clf();plt.imshow(dd_b);plt.colorbar();\nplt.title('PAR gradient_rebinned, 2pix \\n'+tn[0]+' RMS={:.2e}'.format(np.std(dd_b))); \nplt.clim(-8e-9,8e-9)\nplt.show();\n\n# fits_file_name = os.path.join(dove, name)\n# pyfits.writeto(fits_file_name, masked_ima.data)\n# pyfits.append(fits_file_name, masked_ima.mask.astype(int))\n","repo_name":"ChiaraSelmi/M4","sub_path":"m4/misc/20230303_par_analysis_fullframe_luca.py","file_name":"20230303_par_analysis_fullframe_luca.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"12551425448","text":"\"\"\"\nFunction h(x) guesstimates whether a given song, or set of features x, is genre Y (or not).\n\n1 'Pop_Rock'\n2 'Electronic'\n3 'Rap'\n4 'Jazz'\n5 'Latin'\n6 'RnB'\n7 'International'\n8 'Country'\n9 'Reggae'\n10 'Blues'\n\nThere are several commented out blocks here,\n- one produces bar charts with predicted labels\n- one produces and plots a confusion matrix\n\nYou can adjust the amount of layers and the amount of nodes in them\nby adjusting the variable N, which you can find in the begigning of main\n(to find main just go Ctrl + F \"# Main #\" )\n\n\"\"\"\nimport itertools\nimport os\nimport csv\nimport matplotlib.pyplot as plt\nimport plot_barchart\nimport numpy as np\n# the neural network itself\nfrom sklearn.neural_network import MLPClassifier as mpl\n# Neural networks are sensitive to feature scaling, so we'll preprocess the data a bit more I guess\nfrom sklearn.preprocessing import StandardScaler as sclr\n# Reporting tools included in sklearn\nfrom sklearn.metrics import classification_report as cr,confusion_matrix as cm, log_loss as ll\nfrom sklearn.model_selection import train_test_split\n\n# Firstly let's define some vars\n# Starting with paths, just for the sake of convenience and readability\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\ndata_path = os.path.join(__location__, \"data/\")\ntrain_data_path = data_path + \"train_data.csv\"\ntrain_label_path = data_path + \"train_labels.csv\"\ntest_data_path = data_path + \"test_data.csv\"\ndummy_path = data_path + \"dummy\"\n\n# Then the labels, this is from the project description paper\nlabels = ['Pop_Rock',\n 'Electronic',\n 'Rap',\n 'Jazz',\n 'Latin',\n 'RnB',\n 'International',\n 'Country',\n 'Reggae',\n 'Blues']\n\n# Create Neural Network\n## Allow dumping of weights to file?\n### (this way only train once)\n\n# Reading the csv\n## Write line number and prediction in solution.csv\n\n#First, let's analyze the data a bit\n# We'll draw a bar diagram with the sums of all the labels\n\n# Function for reading the CSV files\ndef readCSV(data_path):\n countingvarlimit = 100000\n data_content = []\n with open(data_path, 'r') as csvfile:\n countingvar = 0\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n data_content.append([float(i) for i in row])\n if countingvar > countingvarlimit:\n break\n countingvar += 1\n# return data_content, data_labels\n return data_content\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n################################## Main ##################################\n\n# This is how many layers (and perceptrons per layer) we'll have\nN = (100)\n\n# Read data into memory\ntraining_data = readCSV(train_data_path)\ntraining_labels = readCSV(train_label_path)\ntest_data = readCSV(test_data_path)\n\n# Scale the data\nscaler = sclr()\n# Fit only to training\nscaler.fit(training_data)\n\n# Create the classifier, first one will have 10 layers\n# Second 10, 10\n# Third 10, 10, 10\n# Fourth 100\n# Fifth 100, 100\n# Sixth 100, 100, 100\n# Seventh 1000\n# Eight 1000, 1000\n# Ninth 1000, 1000, 1000\nclassifier = mpl(N, max_iter=500)\n# Set these, just needed to be this I guess\nclassifier.out_activation_ = \"Softmax\"\nclassifier.n_outputs_ = 10\n\nclassifier.fit(training_data, training_labels)\n\npredictions = classifier.predict(test_data)\n\n#Sample_id,Sample_label\nlinecounter = 1\n# Write predictions to a .csv\nwriter = csv.writer(open(\"solution_accuracy_\"+str(N)+\".csv\", \"wb\"))\nfor i in list(predictions):\n writer.writerow([linecounter,int(i)])\n linecounter += 1\n'''\n# 1st line: Sample_id,Class_1,Class_2,Class_3,Class_4,Class_5,Class_6,Class_7,Class_8,Class_9,Class_10\n# Then do the logloss\nwriter2 = csv.writer(open(\"solution_logloss.csv\", \"wb\"))\nproba = classifier.predict_proba(test_data)\nlinecounter = 1\nfor row in proba:\n #print str(list(row)).strip(\"[]\")\n line = []\n line.append(linecounter)\n for e in list(row):\n line.append(float(e))\n writer2.writerow(line)\n linecounter += 1\n #if linecounter > 10:\n # break\n''' \n\n#np.savetxt('solution.csv', ([ int(x) for x in predictions ]), delimiter=',')\n\n#Enable bar chart here!\n'''\n\n# Plot bar chart\n\nlabel_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}\n# Plot the labels of the training data, this part can be removed later\nfor i in list(predictions):\n label_count[int(i)] = label_count[int(i)] + 1\n#print label_count\n#print list(label_count.values())\nplot_barchart.plot_bar(list(label_count.values()), \"N = \"+str(N)+\", test\")\n\n# Produce confusion matrix\n# Split the training set, that's how we'll get both y_true and y_pred\n#x_true = training_data[0:len(training_data)/2]\n#y_true = training_labels[0:len(training_data)/2]\n#x_the_rest = training_data[len(training_data)/2:len(training_data)]\n#y_the_rest = training_labels[len(training_labels)/2:len(training_labels)]\n''' #Enable bar chart\n\n\n'''\n# Split the data into a training set and a test set\nX_train, X_test, y_train, y_test = train_test_split(training_data, training_labels, random_state=0)\n\n#print y_the_rest\n\n#scaler.fit(x_the_rest)\nclassifier2 = mpl(N, max_iter=500)\nclassifier2.fit(X_train, y_train)\ny_pred = classifier2.predict(X_test)\n\n# Compute confusion matrix\ncnf_matrix = cm(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\n#plt.figure()\n#plot_confusion_matrix(cnf_matrix, classes=class_names,\n# title='Confusion matrix, without normalization')\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=labels, normalize=True,\n title='Normalized confusion matrix')\n\nplt.show()\n'''","repo_name":"kanatanssi/ml_basic_principles","sub_path":"project/rockOrNot.py","file_name":"rockOrNot.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73375363993","text":"from django import forms\nfrom django.forms import ModelForm\nfrom .models import Post, Author\nfrom django.core import serializers\n\nclass PostForm(forms.Form):\n title = forms.CharField(max_length=200)\n description = forms.CharField(max_length=500, required=False)\n\n content_type = ((\"text/markdown\", \"text/markdown\"),\n (\"text/plain\", \"text/plain\"),\n (\"application/app\", \"application/base64\"),\n (\"image/png\", \"image/png;base64\"),\n (\"image/jpeg\", \"image/jpeg;base64\"),\n (\"HTML\", \"HTML\"),\n )\n contentType = forms.CharField(max_length=20, required=True,\n widget=forms.Select(choices=content_type, attrs={'class':'dropdown-item', 'style':'width:20%; background-color:#ededed;'}))\n text = forms.CharField(required=False, widget=forms.Textarea)\n file = forms.FileField(required=False)\n\n\n categories_list = (\n ('Web', 'Web'),\n ('Tutorial', 'Tutorial'),\n )\n\n categories = forms.CharField(max_length=20, required=True,\n widget=forms.Select(choices=categories_list, attrs={'class':'dropdown-item', 'style':'width:20%; background-color:#ededed;'}))\n\n visible = (\n ('PUBLIC', 'Public'),\n ('FRIENDS', 'Friends'),\n )\n visibility = forms.CharField(max_length=20, required=True,\n widget=forms.Select(choices=visible, attrs={'class':'dropdown-item', 'style':'width:20%; background-color:#ededed;'}))\n unlisted = forms.BooleanField(required=False)\n\n fields = [\n 'title',\n 'source',\n 'origin',\n 'description',\n 'count',\n 'size',\n 'visibility',\n 'unlisted',]\n\nclass contentForm(ModelForm):\n pass\n\nclass PostFormTest(ModelForm):\n class Meta:\n model = Post\n fields = ['title', 'source', 'origin', 'description', 'count', 'size', 'visibility', 'unlisted',]\n # exclude = ('author_id','author', 'type', 'contentType', 'content', 'catergories', 'comments_id', 'comments', 'pulished',)\n\n'''\n# comment form\nclass CommentForm(forms.ModelForm):\n text = forms.CharField(required=False)\n file = forms.FileField(required=False)\n unlisted = forms.BooleanField(required=False)\n \n fields = [\n #'page'\n #'size',\n 'file',\n 'unlisted'\n ]\n'''\n\n# for the future post form\nclass PostCreationForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = [\n 'author_id',\n 'title',\n 'source',\n 'origin',\n 'description',\n 'count',\n 'size',\n # 'comments',\n 'visibility',\n 'unlisted',]\n author = forms.JSONField()\n \n def set_author(self, author):\n data = self.data.copy()\n data['author'] = serializers.serialize('json', Author.objects.filter(auth_id =data[\"email\"]).values())\n self.data = data\n\n def set_content(self, content):\n data = self.data.copy()\n with open(\"dog.jpg\", \"r\") as f:\n content = f\n data['content'] = content.data_base64\n self.data = data\n\n","repo_name":"CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution","sub_path":"Posts/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"9655280082","text":"from nltk.tokenize import sent_tokenize\nimport glob\nimport re\nfrom selenium import webdriver \nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport itertools\nimport time\n\nfrom multiprocessing.pool import ThreadPool, Pool\nimport threading\n\n\nthreadLocal = threading.local()\n\ndef driver_create():\n driver = getattr(threadLocal, 'driver', None) \n if driver is None: # check if thread has it's driver\n chrome_options = Options()\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-gpu\") # efficient web driver initization\n chrome_options.add_argument(\"--headless\")\n driver = webdriver.Chrome('chromedriver',options=chrome_options)\n driver.get(\"https://translate.google.com/#view=home&op=translate&sl=en&tl=hy\") \n setattr(threadLocal, 'driver', driver)\n return driver\n\n\ndef trans_get(texts):\n driver=driver_create()\n wait = WebDriverWait(driver, 10)\n input=wait.until(lambda driver: driver.find_element_by_xpath('//textarea[@id=\"source\"]')) # get input textarea if it exists\n input.send_keys(texts)\n output=wait.until(lambda driver: driver.find_element_by_xpath('//span[@class=\"tlid-translation translation\"]')) # get output if it exists\n with open('outputneg.txt','a') as f:\n f.write(output.text+'\\n')\n time.sleep(1) # wait to avoiding repetated copy\n input.clear()\n\nmylist = [f for f in glob.glob(\"neg/*.txt\")] # get filenames\n\nfinalist = itertools.chain.from_iterable([sent_tokenize(re.sub('<.*?>', '', open(i).read())) for i in mylist]) # extract all lines from all files\n\n\n\nThreadPool(10).map(trans_get,finalist) # instantiate threaded translators\n","repo_name":"HovhannesManushyan/Armenian_Language_Sentiment_Analysis","sub_path":"Google_translate/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"2655668651","text":"import pulp_test, json, pulp_auto\nfrom pulp_auto.repo import Repo, Importer, Distributor, create_yum_repo\nfrom pulp_auto.task import Task, GroupTask\nfrom pulp_auto.units import Orphans\nfrom . import ROLES\n\n\ndef setUpModule():\n pass\n\n@pulp_test.requires_any('repos', lambda repo: repo.type == 'rpm')\nclass SimpleRepoCopyTest(pulp_test.PulpTest):\n\n @classmethod\n def setUpClass(cls):\n super(SimpleRepoCopyTest, cls).setUpClass()\n #Destination repo\n feed = None\n dest_repo_name = cls.__name__ + '_copy'\n dest_repo1 = Repo({'id': dest_repo_name})\n dest_repo1.delete(cls.pulp)\n cls.dest_repo1, _, _ = create_yum_repo(cls.pulp, dest_repo_name, feed)\n\n #2nd Destination Repo\n dest_repo_name = cls.__name__ + '_copy1'\n dest_repo2 = Repo({'id': dest_repo_name})\n dest_repo2.delete(cls.pulp)\n cls.dest_repo2, _, _ = create_yum_repo(cls.pulp, dest_repo_name, feed)\n\n # Source repo\n default_repo_config = [repo for repo in ROLES.repos if repo.type == 'rpm'][0]\n source_repo_name = cls.__name__ + '_repo'\n source_repo = Repo({'id': source_repo_name})\n source_repo.delete(cls.pulp)\n cls.source_repo, _, _ = create_yum_repo(cls.pulp, source_repo_name, default_repo_config.feed)\n sync_task = Task.from_response(cls.source_repo.sync(cls.pulp))[0]\n sync_task.wait(cls.pulp)\n\n def test_1_copy_repo_all(self):\n response = self.dest_repo1.copy(self.pulp, self.source_repo.id, data={})\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_2_copy_rpm(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['rpm']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_3_copy_category(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['package_category']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_4_copy_group(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['package_group']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_4_copy_distribution(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['distribution']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_5_copy_erratum(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['erratum']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n def test_6_copy_srpm(self):\n response = self.dest_repo2.copy(\n self.pulp,\n self.source_repo.id,\n data={\n 'criteria': {\n 'type_ids': ['srpm']\n },\n }\n )\n self.assertPulp(code=202)\n task = Task.from_response(response)\n task.wait(self.pulp)\n\n @classmethod\n def tearDownClass(cls):\n for repo_id in ['SimpleRepoCopyTest_repo', 'SimpleRepoCopyTest_copy', 'SimpleRepoCopyTest_copy1']:\n Repo({'id': repo_id}).delete(cls.pulp)\n #orphans also should be deleted in cleanup\n delete_response = Orphans.delete(cls.pulp)\n delete_task = Task.from_response(delete_response)\n delete_task.wait(cls.pulp)\n","repo_name":"alexxa/pulp-automation","sub_path":"tests/test_12_yum_repo_copy.py","file_name":"test_12_yum_repo_copy.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"73203119192","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os\n\nimport cv2\nimport numpy as np\n\n\nGHOST_POS_W = 0\nGHOST_POS_H = 0\nWINDOW_NAME = \"cap\"\n\n\ndef main():\n config = load_config()\n\n cap = cv2.VideoCapture(config[\"camera\"])\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n cap_w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n cap_h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n cv2.namedWindow(WINDOW_NAME)\n\n # 合成する画像を準備\n img_file = os.path.join(config[\"image_path\"], config[\"image_file\"])\n ghost_img = cv2.imread(img_file)\n gray_img = cv2.cvtColor(ghost_img, cv2.COLOR_BGR2GRAY)\n _, mask = cv2.threshold(gray_img, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # 顔検出準備\n cascade_file = os.path.join(config[\"cascade_path\"], config[\"cascade_file\"])\n cascade = cv2.CascadeClassifier(cascade_file)\n # 顔判定の元となるサイズ\n base_w, base_h = cap_w//4, cap_h//4\n # キャリブレーション用フラグ\n calibrating = False\n\n print(f\"camera size: {cap_w}x{cap_h}\")\n print(f\"camera fps: {fps}\")\n\n print(\"press q to quit\")\n print(\"press c to calibrate face size\")\n while 1:\n ret, frame = cap.read()\n if not ret:\n continue\n\n # 顔認識\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n face_rects = cascade.detectMultiScale(\n gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1)\n )\n if len(face_rects) > 0:\n # 最大の矩形を顔候補とする\n rect = face_rects[np.argmax([r[2] for r in face_rects])]\n # 基準の大きさより大きいなら顔と判定\n if rect[2] > base_w*0.8 or rect[3] > base_h*0.8:\n if calibrating:\n rect_color = (0, 0, 255)\n else:\n rect_color = (255, 0, 0)\n cv2.rectangle(\n frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]),\n rect_color, thickness=4\n )\n else:\n # 顔候補はあったが顔と判定されなかった場合: 合成\n appear_ghost(frame, cap_w, cap_h, ghost_img, mask, mask_inv)\n else:\n # 顔候補がなかった場合: 合成\n appear_ghost(frame, cap_w, cap_h, ghost_img, mask, mask_inv)\n\n cv2.imshow(WINDOW_NAME, frame)\n\n key = cv2.waitKey(fps)\n if key & 0xFF == ord(\"q\"):\n break\n elif key & 0xFF == ord(\"c\"):\n if calibrating:\n base_w = rect[2]\n base_h = rect[3]\n calibrating = not calibrating\n\n cap.release()\n cv2.destroyWindow(WINDOW_NAME)\n\n\ndef appear_ghost(frame, w, h, img, mask, mask_inv):\n # 画面上の暗い場所を探索\n # 画像をいい感じに重畳表示\n row, col = img.shape[:2]\n roi = frame[GHOST_POS_W:GHOST_POS_W+row,\n GHOST_POS_H:GHOST_POS_H+col]\n\n frame_background = cv2.bitwise_and(roi, roi, mask=mask_inv)\n img_foreground = cv2.bitwise_and(img, img, mask=mask)\n res = cv2.add(frame_background, img_foreground)\n frame[GHOST_POS_W:GHOST_POS_W+row, GHOST_POS_H:GHOST_POS_H+col] = res\n\n\ndef load_config(path=\"./conf\"):\n file = os.path.join(path, \"config.json\")\n with open(file, \"r\") as f:\n data = json.load(f)\n return data\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ymsk-sky/look_back_cv","sub_path":"recognize_looking_back.py","file_name":"recognize_looking_back.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39252927749","text":"import sys, os, time, socket, signal\nimport fnmatch, errno, threading\nimport serial, select\nimport queue as Queue\nimport imp\nimport traceback\nimport select\nimport shlex\nimport math\nimport Ice\nimport jderobot\nimport multiprocessing\nimport time\n\nfrom MAVProxy.modules.lib import textconsole\nfrom MAVProxy.modules.lib import rline\nfrom MAVProxy.modules.lib import mp_module\nfrom MAVProxy.modules.lib import dumpstacks\nfrom MAVProxy.modules.lib import udp\nfrom MAVProxy.modules.lib import tcp\n\n#import easyiceconfig as EasyIce\n\nfrom Pose3D import Pose3DI\nfrom CMDVel import CMDVelI\nfrom Extra import ExtraI\nfrom pymavlink import quaternion\n\nglobal operation_takeoff\nglobal time_init_operation_takeoff\nglobal time_end_operation_takeoff\nglobal on_air\n\n# adding all this allows pyinstaller to build a working windows executable\n# note that using --hidden-import does not work for these modules\ntry:\n from multiprocessing import freeze_support\n from pymavlink import mavwp, mavutil\n import matplotlib, HTMLParser\n try:\n import readline\n except ImportError:\n import pyreadline as readline\nexcept Exception:\n pass\n\n\nif __name__ == '__main__':\n freeze_support()\n\nclass MPStatus(object):\n '''hold status information about the mavproxy'''\n def __init__(self):\n self.gps\t = None\n self.msgs = {}\n self.msg_count = {}\n self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}\n self.setup_mode = opts.setup\n self.mav_error = 0\n self.altitude = 0\n self.last_altitude_announce = 0.0\n self.last_distance_announce = 0.0\n self.exit = False\n self.flightmode = 'MAV'\n self.last_mode_announce = 0\n self.logdir = None\n self.last_heartbeat = 0\n self.last_message = 0\n self.heartbeat_error = False\n self.last_apm_msg = None\n self.last_apm_msg_time = 0\n self.highest_msec = 0\n self.have_gps_lock = False\n self.lost_gps_lock = False\n self.last_gps_lock = 0\n self.watch = None\n self.last_streamrate1 = -1\n self.last_streamrate2 = -1\n self.last_seq = 0\n self.armed = False\n\n def show(self, f, pattern=None):\n '''write status to status.txt'''\n if pattern is None:\n f.write('Counters: ')\n for c in self.counters:\n f.write('%s:%s ' % (c, self.counters[c]))\n f.write('\\n')\n f.write('MAV Errors: %u\\n' % self.mav_error)\n f.write(str(self.gps)+'\\n')\n for m in sorted(self.msgs.keys()):\n if pattern is not None and not fnmatch.fnmatch(str(m).upper(), pattern.upper()):\n continue\n f.write(\"%u: %s\\n\" % (self.msg_count[m], str(self.msgs[m])))\n\n def write(self):\n '''write status to status.txt'''\n f = open('status.txt', mode='w')\n self.show(f)\n f.close()\n\ndef say_text(text, priority='important'):\n '''text output - default function for say()'''\n mpstate.console.writeln(text)\n\ndef say(text, priority='important'):\n '''text and/or speech output'''\n mpstate.functions.say(text, priority)\n\ndef add_input(cmd, immediate=False):\n '''add some command input to be processed'''\n if immediate:\n process_stdin(cmd)\n else:\n mpstate.input_queue.put(cmd)\n\nclass MAVFunctions(object):\n '''core functions available in modules'''\n def __init__(self):\n self.process_stdin = add_input\n self.param_set = param_set\n self.get_mav_param = get_mav_param\n self.say = say_text\n # input handler can be overridden by a module\n self.input_handler = None\n\nclass MPState(object):\n '''holds state of mavproxy'''\n def __init__(self):\n self.udp = udp.UdpServer()\n self.tcp = tcp.TcpServer()\n self.console = textconsole.SimpleConsole(udp = self.udp, tcp = self.tcp)\n self.map = None\n self.map_functions = {}\n self.vehicle_type = None\n self.vehicle_name = None\n from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting\n self.settings = MPSettings(\n [ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),\n MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,20), increment=1),\n MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,20), increment=1),\n MPSetting('heartbeat', int, 1, 'Heartbeat rate', range=(0,5), increment=1),\n MPSetting('mavfwd', bool, True, 'Allow forwarded control'),\n MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),\n MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),\n MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),\n MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),\n MPSetting('select_timeout', float, 0.01, 'select timeout'),\n\n MPSetting('altreadout', int, 10, 'Altitude Readout',\n range=(0,100), increment=1, tab='Announcements'),\n MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),\n\n MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),\n MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),\n MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),\n MPSetting('requireexit', bool, False, 'Require exit command'),\n MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),\n\n MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),\n MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),\n MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),\n MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),\n MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),\n MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),\n\n MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),\n MPSetting('source_component', int, 0, 'MAVLink Source component', range=(0,255), increment=1),\n MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),\n MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),\n MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories')\n ])\n\n self.completions = {\n \"script\" : [\"(FILENAME)\"],\n \"set\" : [\"(SETTING)\"],\n \"status\" : [\"(VARIABLE)\"],\n \"module\" : [\"list\",\n \"load (AVAILMODULES)\",\n \"<unload|reload> (LOADEDMODULES)\"]\n }\n\n self.status = MPStatus()\n\n # master mavlink device\n self.mav_master = None\n\n # mavlink outputs\n self.mav_outputs = []\n self.sysid_outputs = {}\n\n # SITL output\n self.sitl_output = None\n\n self.mav_param = mavparm.MAVParmDict()\n self.modules = []\n self.public_modules = {}\n self.functions = MAVFunctions()\n self.select_extra = {}\n self.continue_mode = False\n self.aliases = {}\n import platform\n self.system = platform.system()\n\n def module(self, name):\n '''Find a public module (most modules are private)'''\n if name in self.public_modules:\n return self.public_modules[name]\n return None\n\n def master(self):\n '''return the currently chosen mavlink master object'''\n if len(self.mav_master) == 0:\n return None\n if self.settings.link > len(self.mav_master):\n self.settings.link = 1\n # try to use one with no link error\n if not self.mav_master[self.settings.link-1].linkerror:\n return self.mav_master[self.settings.link-1]\n for m in self.mav_master:\n if not m.linkerror:\n return m\n return self.mav_master[self.settings.link-1]\n\n\ndef get_mav_param(param, default=None):\n '''return a EEPROM parameter value'''\n return mpstate.mav_param.get(param, default)\n\ndef param_set(name, value, retries=3):\n '''set a parameter'''\n name = name.upper()\n return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)\n\ndef cmd_script(args):\n '''run a script'''\n if len(args) < 1:\n print(\"usage: script <filename>\")\n return\n\n run_script(args[0])\n\ndef cmd_set(args):\n '''control mavproxy options'''\n mpstate.settings.command(args)\n\ndef cmd_status(args):\n '''show status'''\n if len(args) == 0:\n mpstate.status.show(sys.stdout, pattern=None)\n else:\n for pattern in args:\n mpstate.status.show(sys.stdout, pattern=pattern)\n\ndef cmd_setup(args):\n mpstate.status.setup_mode = True\n mpstate.rl.set_prompt(\"\")\n\n\ndef cmd_reset(args):\n print(\"Resetting master\")\n mpstate.master().reset()\n\ndef cmd_watch(args):\n '''watch a mavlink packet pattern'''\n if len(args) == 0:\n mpstate.status.watch = None\n return\n mpstate.status.watch = args[0]\n print(\"Watching %s\" % mpstate.status.watch)\n\ndef load_module(modname, quiet=False):\n '''load a module'''\n modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]\n for (m,pm) in mpstate.modules:\n if m.name == modname:\n if not quiet:\n print(\"module %s already loaded\" % modname)\n return False\n for modpath in modpaths:\n try:\n m = import_package(modpath)\n imp.reload(m)\n module = m.init(mpstate)\n if isinstance(module, mp_module.MPModule):\n mpstate.modules.append((module, m))\n if not quiet:\n print(\"Loaded module %s\" % (modname,))\n return True\n else:\n ex = \"%s.init did not return a MPModule instance\" % modname\n break\n except ImportError as msg:\n ex = msg\n if mpstate.settings.moddebug > 1:\n import traceback\n print(traceback.format_exc())\n print(\"Failed to load module: %s. Use 'set moddebug 3' in the MAVProxy console to enable traceback\" % ex)\n return False\n\ndef unload_module(modname):\n '''unload a module'''\n for (m,pm) in mpstate.modules:\n if m.name == modname:\n if hasattr(m, 'unload'):\n m.unload()\n mpstate.modules.remove((m,pm))\n print(\"Unloaded module %s\" % modname)\n return True\n print(\"Unable to find module %s\" % modname)\n return False\n\ndef cmd_module(args):\n '''module commands'''\n usage = \"usage: module <list|load|reload|unload>\"\n if len(args) < 1:\n print(usage)\n return\n if args[0] == \"list\":\n for (m,pm) in mpstate.modules:\n print(\"%s: %s\" % (m.name, m.description))\n elif args[0] == \"load\":\n if len(args) < 2:\n print(\"usage: module load <name>\")\n return\n load_module(args[1])\n elif args[0] == \"reload\":\n if len(args) < 2:\n print(\"usage: module reload <name>\")\n return\n modname = args[1]\n pmodule = None\n for (m,pm) in mpstate.modules:\n if m.name == modname:\n pmodule = pm\n if pmodule is None:\n print(\"Module %s not loaded\" % modname)\n return\n if unload_module(modname):\n import zipimport\n try:\n reload(pmodule)\n except ImportError:\n clear_zipimport_cache()\n reload(pmodule)\n if load_module(modname, quiet=True):\n print(\"Reloaded module %s\" % modname)\n elif args[0] == \"unload\":\n if len(args) < 2:\n print(\"usage: module unload <name>\")\n return\n modname = os.path.basename(args[1])\n unload_module(modname)\n else:\n print(usage)\n\n\ndef cmd_alias(args):\n '''alias commands'''\n usage = \"usage: alias <add|remove|list>\"\n if len(args) < 1 or args[0] == \"list\":\n if len(args) >= 2:\n wildcard = args[1].upper()\n else:\n wildcard = '*'\n for a in sorted(mpstate.aliases.keys()):\n if fnmatch.fnmatch(a.upper(), wildcard):\n print(\"%-15s : %s\" % (a, mpstate.aliases[a]))\n elif args[0] == \"add\":\n if len(args) < 3:\n print(usage)\n return\n a = args[1]\n mpstate.aliases[a] = ' '.join(args[2:])\n elif args[0] == \"remove\":\n if len(args) != 2:\n print(usage)\n return\n a = args[1]\n if a in mpstate.aliases:\n mpstate.aliases.pop(a)\n else:\n print(\"no alias %s\" % a)\n else:\n print(usage)\n return\n\n\ndef clear_zipimport_cache():\n \"\"\"Clear out cached entries from _zip_directory_cache.\n See http://www.digi.com/wiki/developer/index.php/Error_messages\"\"\"\n import sys, zipimport\n syspath_backup = list(sys.path)\n zipimport._zip_directory_cache.clear()\n\n # load back items onto sys.path\n sys.path = syspath_backup\n # add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html\n sys.path_importer_cache.clear()\n\n# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected\n# has info on why this is necessary.\n\ndef import_package(name):\n \"\"\"Given a package name like 'foo.bar.quux', imports the package\n and returns the desired module.\"\"\"\n import zipimport\n try:\n mod = __import__(name)\n except ImportError:\n clear_zipimport_cache()\n mod = __import__(name)\n\n components = name.split('.')\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\n\ncommand_map = {\n 'script' : (cmd_script, 'run a script of MAVProxy commands'),\n 'setup' : (cmd_setup, 'go into setup mode'),\n 'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),\n 'status' : (cmd_status, 'show status'),\n 'set' : (cmd_set, 'mavproxy settings'),\n 'watch' : (cmd_watch, 'watch a MAVLink pattern'),\n 'module' : (cmd_module, 'module commands'),\n 'alias' : (cmd_alias, 'command aliases')\n }\n\ndef process_stdin(line):\n '''handle commands from user'''\n if line is None:\n sys.exit(0)\n # allow for modules to override input handling\n if mpstate.functions.input_handler is not None:\n mpstate.functions.input_handler(line)\n return\n\n line = line.strip()\n\n if mpstate.status.setup_mode:\n # in setup mode we send strings straight to the master\n if line == '.':\n mpstate.status.setup_mode = False\n mpstate.status.flightmode = \"MAV\"\n mpstate.rl.set_prompt(\"MAV> \")\n return\n if line != '+++':\n line += '\\r'\n for c in line:\n time.sleep(0.01)\n mpstate.master().write(c)\n return\n\n if not line:\n return\n\n args = shlex.split(line)\n cmd = args[0]\n while cmd in mpstate.aliases:\n line = mpstate.aliases[cmd]\n args = shlex.split(line) + args[1:]\n cmd = args[0]\n\n if cmd == 'help':\n k = command_map.keys()\n #k.sort()\n for cmd in k:\n (fn, help) = command_map[cmd]\n print(\"%-15s : %s\" % (cmd, help))\n return\n if cmd == 'exit' and mpstate.settings.requireexit:\n mpstate.status.exit = True\n return\n ######################################################################################################\n if cmd == 'velocity' and len(args) == 4:\n PH_CMDVel = CMDVelI(args[1],args[2],args[3],0,0,0) #1 to avoid indeterminations\n\n\n\n\n\n\n ######################################################################################################\n if not cmd in command_map:\n for (m,pm) in mpstate.modules:\n if hasattr(m, 'unknown_command'):\n try:\n if m.unknown_command(args):\n return\n except Exception as e:\n print(\"ERROR in command: %s\" % str(e))\n print(\"Unknown command '%s'\" % line)\n return\n\n (fn, help) = command_map[cmd]\n try:\n fn(args[1:])\n except Exception as e:\n print(\"ERROR in command %s: %s\" % (args[1:], str(e)))\n if mpstate.settings.moddebug > 1:\n traceback.print_exc()\n\n\ndef process_master(m):\n '''process packets from the MAVLink master'''\n try:\n s = m.recv(16*1024)\n except Exception:\n time.sleep(0.1)\n return\n # prevent a dead serial port from causing the CPU to spin. The user hitting enter will\n # cause it to try and reconnect\n if len(s) == 0:\n time.sleep(0.1)\n return\n\n if (mpstate.settings.compdebug & 1) != 0:\n return\n\n if mpstate.logqueue_raw:\n mpstate.logqueue_raw.put(str(s))\n\n if mpstate.status.setup_mode:\n if mpstate.system == 'Windows':\n # strip nsh ansi codes\n s = s.replace(\"\\033[K\",\"\")\n sys.stdout.write(str(s))\n sys.stdout.flush()\n return\n\n if m.first_byte and opts.auto_protocol:\n m.auto_mavlink_version(s)\n msgs = m.mav.parse_buffer(s)\n if msgs:\n for msg in msgs:\n sysid = msg.get_srcSystem()\n if sysid in mpstate.sysid_outputs:\n # the message has been handled by a specialised handler for this system\n continue\n if getattr(m, '_timestamp', None) is None:\n m.post_message(msg)\n if msg.get_type() == \"BAD_DATA\":\n if opts.show_errors:\n mpstate.console.writeln(\"MAV error: %s\" % msg)\n mpstate.status.mav_error += 1\n\n\n\ndef process_mavlink(slave):\n '''process packets from MAVLink slaves, forwarding to the master'''\n try:\n buf = slave.recv()\n except socket.error:\n return\n try:\n if slave.first_byte and opts.auto_protocol:\n slave.auto_mavlink_version(buf)\n msgs = slave.mav.parse_buffer(buf)\n except mavutil.mavlink.MAVError as e:\n mpstate.console.error(\"Bad MAVLink slave message from %s: %s\" % (slave.address, e.message))\n return\n if msgs is None:\n return\n if mpstate.settings.mavfwd and not mpstate.status.setup_mode:\n for m in msgs:\n if mpstate.status.watch is not None:\n if fnmatch.fnmatch(m.get_type().upper(), mpstate.status.watch.upper()):\n mpstate.console.writeln('> '+ str(m))\n mpstate.master().write(m.get_msgbuf())\n mpstate.status.counters['Slave'] += 1\n\n\ndef mkdir_p(dir):\n '''like mkdir -p'''\n if not dir:\n return\n if dir.endswith(\"/\"):\n mkdir_p(dir[:-1])\n return\n if os.path.isdir(dir):\n return\n mkdir_p(os.path.dirname(dir))\n os.mkdir(dir)\n\ndef log_writer():\n '''log writing thread'''\n while True:\n mpstate.logfile_raw.write(mpstate.logqueue_raw.get())\n while not mpstate.logqueue_raw.empty():\n mpstate.logfile_raw.write(mpstate.logqueue_raw.get())\n while not mpstate.logqueue.empty():\n mpstate.logfile.write(mpstate.logqueue.get())\n if mpstate.settings.flushlogs:\n mpstate.logfile.flush()\n mpstate.logfile_raw.flush()\n\n# If state_basedir is NOT set then paths for logs and aircraft\n# directories are relative to mavproxy's cwd\ndef log_paths():\n '''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''\n if opts.aircraft is not None:\n if opts.mission is not None:\n print(opts.mission)\n dirname = \"%s/logs/%s/Mission%s\" % (opts.aircraft, time.strftime(\"%Y-%m-%d\"), opts.mission)\n else:\n dirname = \"%s/logs/%s\" % (opts.aircraft, time.strftime(\"%Y-%m-%d\"))\n # dirname is currently relative. Possibly add state_basedir:\n if mpstate.settings.state_basedir is not None:\n dirname = os.path.join(mpstate.settings.state_basedir,dirname)\n mkdir_p(dirname)\n highest = None\n for i in range(1, 10000):\n fdir = os.path.join(dirname, 'flight%u' % i)\n if not os.path.exists(fdir):\n break\n highest = fdir\n if mpstate.continue_mode and highest is not None:\n fdir = highest\n elif os.path.exists(fdir):\n print(\"Flight logs full\")\n sys.exit(1)\n logname = 'flight.tlog'\n logdir = fdir\n else:\n logname = os.path.basename(opts.logfile)\n dir_path = os.path.dirname(opts.logfile)\n if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:\n dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)\n logdir = dir_path\n\n mkdir_p(logdir)\n return (logdir,\n os.path.join(logdir, logname),\n os.path.join(logdir, logname + '.raw'))\n\n\ndef open_telemetry_logs(logpath_telem, logpath_telem_raw):\n '''open log files'''\n if opts.append_log or opts.continue_mode:\n mode = 'a'\n else:\n mode = 'w'\n mpstate.logfile = open(logpath_telem, mode=mode)\n mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)\n print(\"Log Directory: %s\" % mpstate.status.logdir)\n print(\"Telemetry log: %s\" % logpath_telem)\n\n # use a separate thread for writing to the logfile to prevent\n # delays during disk writes (important as delays can be long if camera\n # app is running)\n t = threading.Thread(target=log_writer, name='log_writer')\n t.daemon = True\n t.start()\n\ndef set_stream_rates():\n '''set mavlink stream rates'''\n if (not msg_period.trigger() and\n mpstate.status.last_streamrate1 == mpstate.settings.streamrate and\n mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):\n return\n mpstate.status.last_streamrate1 = mpstate.settings.streamrate\n mpstate.status.last_streamrate2 = mpstate.settings.streamrate2\n for master in mpstate.mav_master:\n if master.linknum == 0:\n rate = mpstate.settings.streamrate\n else:\n rate = mpstate.settings.streamrate2\n if rate != -1:\n master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,\n mavutil.mavlink.MAV_DATA_STREAM_ALL,\n rate, 1)\n\ndef check_link_status():\n '''check status of master links'''\n tnow = time.time()\n if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:\n say(\"no link\")\n mpstate.status.heartbeat_error = True\n for master in mpstate.mav_master:\n if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):\n say(\"link %u down\" % (master.linknum+1))\n master.linkerror = True\n\ndef send_heartbeat(master):\n if master.mavlink10():\n master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,\n 0, 0, 0)\n else:\n MAV_GROUND = 5\n MAV_AUTOPILOT_NONE = 4\n master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)\n\ndef periodic_tasks():\n '''run periodic checks'''\n if mpstate.status.setup_mode:\n return\n\n if (mpstate.settings.compdebug & 2) != 0:\n return\n\n if mpstate.settings.heartbeat != 0:\n heartbeat_period.frequency = mpstate.settings.heartbeat\n\n if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:\n mpstate.status.counters['MasterOut'] += 1\n for master in mpstate.mav_master:\n send_heartbeat(master)\n\n if heartbeat_check_period.trigger():\n check_link_status()\n\n set_stream_rates()\n\n # call optional module idle tasks. These are called at several hundred Hz\n for (m,pm) in mpstate.modules:\n if hasattr(m, 'idle_task'):\n try:\n m.idle_task()\n except Exception as msg:\n if mpstate.settings.moddebug == 1:\n print(msg)\n elif mpstate.settings.moddebug > 1:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=2, file=sys.stdout)\n\n # also see if the module should be unloaded:\n if m.needs_unloading:\n unload_module(m.name)\n\ndef main_loop():\n\n if not mpstate.status.setup_mode and not opts.nowait:\n for master in mpstate.mav_master:\n send_heartbeat(master)\n if master.linknum == 0:\n print(\"Waiting for heartbeat from %s\" % master.address)\n master.wait_heartbeat()\n set_stream_rates()\n\n while True:\n if mpstate is None or mpstate.status.exit:\n return\n\n global on_air\n global operation_takeoff\n global time_init_operation_takeoff\n global time_end_operation_takeoff\n\n time_now = int(round(time.time() * 1000))\n\n if operation_takeoff and time_now > time_end_operation_takeoff:\n print(\"Taking off\")\n time_init_operation_takeoff = int(round(time.time() * 1000))\n time_end_operation_takeoff = time_init_operation_takeoff + 7000\n operation_takeoff = False\n on_air = True\n mpstate.input_queue.put(\"takeoff 1\")\n\n if on_air and time_now > time_end_operation_takeoff:\n mpstate.input_queue.put(\"mode guided\")\n print(\"Mode guided on\")\n on_air = False\n\n while not mpstate.input_queue.empty():\n line = mpstate.input_queue.get()\n mpstate.input_count += 1\n cmds = line.split(';')\n if len(cmds) == 1 and cmds[0] == \"\":\n mpstate.empty_input_count += 1\n for c in cmds:\n #print(c)\n process_stdin(c)\n\n for master in mpstate.mav_master:\n if master.fd is None:\n if master.port.inWaiting() > 0:\n process_master(master)\n\n periodic_tasks()\n\n rin = []\n for master in mpstate.mav_master:\n if master.fd is not None and not master.portdead:\n rin.append(master.fd)\n for m in mpstate.mav_outputs:\n rin.append(m.fd)\n for sysid in mpstate.sysid_outputs:\n m = mpstate.sysid_outputs[sysid]\n rin.append(m.fd)\n if rin == []:\n time.sleep(0.0001)\n continue\n\n for fd in mpstate.select_extra:\n rin.append(fd)\n try:\n (rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)\n except select.error:\n continue\n\n if mpstate is None:\n return\n\n for fd in rin:\n if mpstate is None:\n return\n for master in mpstate.mav_master:\n if fd == master.fd:\n process_master(master)\n if mpstate is None:\n return\n continue\n for m in mpstate.mav_outputs:\n if fd == m.fd:\n process_mavlink(m)\n if mpstate is None:\n return\n continue\n\n for sysid in mpstate.sysid_outputs:\n m = mpstate.sysid_outputs[sysid]\n if fd == m.fd:\n process_mavlink(m)\n if mpstate is None:\n return\n continue\n\n # this allow modules to register their own file descriptors\n # for the main select loop\n if fd in mpstate.select_extra:\n try:\n # call the registered read function\n (fn, args) = mpstate.select_extra[fd]\n fn(args)\n except Exception as msg:\n if mpstate.settings.moddebug == 1:\n print(msg)\n # on an exception, remove it from the select list\n mpstate.select_extra.pop(fd)\n\n ########################## Jorge Cano CODE ##########################\n\n Rollvalue = mpstate.status.msgs['ATTITUDE'].roll #rad\n Pitchvalue = mpstate.status.msgs['ATTITUDE'].pitch #rad\n Yawvalue = mpstate.status.msgs['ATTITUDE'].yaw #rad\n\n # ESTIMATED: fused GPS and accelerometers\n PoseLatLonHei = {}\n PoseLatLonHei['lat'] = math.radians((mpstate.status.msgs['GLOBAL_POSITION_INT'].lat)/1E7) #rad\n PoseLatLonHei['lon'] = math.radians((mpstate.status.msgs['GLOBAL_POSITION_INT'].lon)/1E7) #rad\n PoseLatLonHei['hei'] = (mpstate.status.msgs['GLOBAL_POSITION_INT'].relative_alt)/1000 #meters\n\n PH_quat = quaternion.Quaternion([Rollvalue, Pitchvalue, Yawvalue])\n PH_xyz = global2cartesian(PoseLatLonHei)\n\n #print (PH_quat)\n #print (PH_xyz)\n\n data = jderobot.Pose3DData()\n data.x = PH_xyz['x']\n data.y = PH_xyz['y']\n data.z = PH_xyz['z']\n data.h = 1\n data.q0 = PH_quat.__getitem__(0)\n data.q1 = PH_quat.__getitem__(1)\n data.q2 = PH_quat.__getitem__(2)\n data.q3 = PH_quat.__getitem__(3)\n #print(data)\n PH_Pose3D.setPose3DData(data)\n\n #####################################################################\n\ndef input_loop():\n '''wait for user input'''\n global operation_takeoff\n global time_init_operation_takeoff\n global time_end_operation_takeoff\n\n while mpstate.status.exit != True:\n try:\n if mpstate.status.exit != True:\n if mpstate.udp.bound():\n line = mpstate.udp.readln()\n mpstate.udp.writeln(line)\n elif mpstate.tcp.connected():\n line = mpstate.tcp.readln()\n mpstate.tcp.writeln(line)\n else:\n line = input(mpstate.rl.prompt)\n if line == 'takeoff':\n print(\"Detecto takeoff\")\n operation_takeoff=True\n time_init_operation_takeoff = int(round(time.time() * 1000))\n time_end_operation_takeoff = time_init_operation_takeoff + 5000\n print(time_end_operation_takeoff)\n mpstate.input_queue.put(\"arm throttle\")\n return\n if line == 'land':\n print(\"Orden de aterrizar\")\n on_air = False\n except EOFError:\n mpstate.status.exit = True\n sys.exit(1)\n mpstate.input_queue.put(line)\n\ndef run_script(scriptfile):\n '''run a script file'''\n try:\n f = open(scriptfile, mode='r')\n except Exception:\n return\n mpstate.console.writeln(\"Running script %s\" % scriptfile)\n for line in f:\n line = line.strip()\n if line == \"\" or line.startswith('#'):\n continue\n if line.startswith('@'):\n line = line[1:]\n else:\n mpstate.console.writeln(\"-> %s\" % line)\n process_stdin(line)\n f.close()\n\n########################## Jorge Cano CODE ##########################\n\ndef openPose3DChannel(Pose3D):\n status = 0\n ic = None\n Pose2Tx = Pose3D #Pose3D.getPose3DData()\n #print(Pose3D)\n try:\n ic = Ice.initialize(sys.argv)\n adapter = ic.createObjectAdapterWithEndpoints(\"Pose3DAdapter\", \"default -p 9998\")\n object = Pose2Tx\n adapter.add(object, ic.stringToIdentity(\"Pose3D\"))\n adapter.activate()\n ic.waitForShutdown()\n except:\n traceback.print_exc()\n status = 1\n\n if ic:\n # Clean up\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\n\n sys.exit(status)\n\ndef openPose3DChannelWP(Pose3D):\n\n status = 0\n ic = None\n Pose2Rx = Pose3D #Pose3D.getPose3DData()\n try:\n ic = Ice.initialize(sys.argv)\n adapter = ic.createObjectAdapterWithEndpoints(\"Pose3DAdapter\", \"default -p 9994\")\n object = Pose2Rx\n #print (object.getPose3DData())\n adapter.add(object, ic.stringToIdentity(\"Pose3D\"))\n adapter.activate()\n ic.waitForShutdown()\n except:\n traceback.print_exc()\n status = 1\n\n if ic:\n # Clean up\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\n\n sys.exit(status)\n\ndef openCMDVelChannel(CMDVel):\n status = 0\n ic = None\n CMDVel2Rx = CMDVel #CMDVel.getCMDVelData()\n try:\n ic = Ice.initialize(sys.argv)\n adapter = ic.createObjectAdapterWithEndpoints(\"CMDVelAdapter\", \"default -p 9997\")\n object = CMDVel2Rx\n print(object)\n adapter.add(object, ic.stringToIdentity(\"CMDVel\"))\n adapter.activate()\n ic.waitForShutdown()\n except:\n traceback.print_exc()\n status = 1\n\n if ic:\n # Clean up\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\n\n sys.exit(status)\n\ndef openExtraChannel(Extra):\n\n status = 0\n ic = None\n Extra2Tx = Extra\n try:\n ic = Ice.initialize(sys.argv)\n adapter = ic.createObjectAdapterWithEndpoints(\"ExtraAdapter\", \"default -p 9995\")\n object = Extra2Tx\n adapter.add(object, ic.stringToIdentity(\"Extra\"))\n adapter.activate()\n ic.waitForShutdown()\n except:\n traceback.print_exc()\n status = 1\n\n if ic:\n # Clean up\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\n\n sys.exit(status)\n\nclass NavdataI(jderobot.Navdata):\n def __init__(self):\n pass\n\n def getNavdata(self, current=None):\n data = jderobot.NavdataData()\n return data\n\ndef openNavdataChannel():\n\n status = 0\n ic = None\n Navdata2Tx = NavdataI()\n\n try:\n ic = Ice.initialize(sys.argv)\n adapter = ic.createObjectAdapterWithEndpoints(\"NavdataAdapter\", \"default -p 9996\")\n object = Navdata2Tx\n adapter.add(object, ic.stringToIdentity(\"Navdata\"))\n adapter.activate()\n ic.waitForShutdown()\n except:\n traceback.print_exc()\n status = 1\n\n if ic:\n # Clean up\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\n\n sys.exit(status)\n\ndef sendCMDVel2Vehicle(CMDVel,Pose3D):\n while True:\n\n CMDVel2send = CMDVel.getCMDVelData()\n Pose3D2send = Pose3D.getPose3DData()\n #print(Pose3D2send)\n NEDvel = body2NED(CMDVel2send, Pose3D2send) # [x,y,z]\n linearXstring = str(NEDvel[0])\n linearYstring = str(NEDvel[1])\n linearZstring = str(NEDvel[2])\n\n angular = Pose3D2send.q3 + CMDVel.angularZ\n if angular > 1:\n angular = angular - 2\n elif angular < -1:\n angular = angular + 2\n angularZstring = str(angular*180)\n\n velocitystring = 'velocity '+ linearXstring + ' ' + linearYstring + ' ' + linearZstring\n angularString = 'setyaw ' + angularZstring + ' 1 0'\n\n process_stdin(velocitystring) # SET_POSITION_TARGET_LOCAL_NED\n process_stdin(angularString)\n\ndef sendWayPoint2Vehicle(Pose3D):\n\n while True:\n time.sleep(1)\n wayPointPoseXYZ = Pose3D.getPose3DData()\n wayPointXYZ = {}\n wayPointXYZ['x'] = wayPointPoseXYZ.x\n wayPointXYZ['y'] = wayPointPoseXYZ.y\n wayPointXYZ['z'] = wayPointPoseXYZ.z\n wayPointLatLonHei = cartesian2global(wayPointXYZ)\n\n latittude = str(wayPointLatLonHei['lat'])\n longitude = str(wayPointLatLonHei['lon'])\n altittude = str(int(wayPointLatLonHei['hei']))\n\n WPstring = 'guided ' + latittude + ' ' + longitude + ' ' + altittude\n process_stdin(WPstring)\n\n #print wayPoint\n\ndef landDecision(PH_Extra):\n\n global operation_takeoff\n global time_init_operation_takeoff\n global time_end_operation_takeoff\n while True:\n if PH_Extra.landDecision:\n print(\"Landing\")\n process_stdin(\"land\")\n PH_Extra.setLand(False)\n if PH_Extra.takeOffDecision:\n print(\"Takeoff\")\n operation_takeoff=True\n print(time_end_operation_takeoff)\n time_init_operation_takeoff = int(round(time.time() * 1000))\n time_end_operation_takeoff = time_init_operation_takeoff + 5000\n print(\"Arming proppellers\")\n mpstate.input_queue.put(\"arm throttle\")\n PH_Extra.setTakeOff(False)\n\ndef global2cartesian(poseLatLonHei):\n\n wgs84_radius = 6378137 #meters\n wgs84_flattening = 1 - 1 / 298.257223563\n eartPerim = wgs84_radius * 2 * math.pi\n\n earthRadiusLon = wgs84_radius * math.cos(poseLatLonHei['lat'])/wgs84_flattening\n eartPerimLon = earthRadiusLon * 2 * math.pi\n\n poseXYZ = {}\n poseXYZ['x'] = poseLatLonHei['lon'] * eartPerimLon / (2*math.pi)\n poseXYZ['y'] = poseLatLonHei['lat'] * eartPerim / (2*math.pi)\n poseXYZ['z'] = poseLatLonHei['hei']\n\n return poseXYZ\n\ndef cartesian2global(poseXYZ):\n\n wgs84_radius = 6378137 # meters\n wgs84_flattening = 1 - 1 / 298.257223563\n eartPerim = wgs84_radius * 2 * math.pi\n referenceLat = 40.1912 ##################### Suposed to be Vehicle lattitude\n\n radLat = math.radians(referenceLat)\n earthRadiusLon = wgs84_radius * math.cos(radLat)/wgs84_flattening\n eartPerimLon = earthRadiusLon * 2 * math.pi\n\n poseLatLonHei = {}\n poseLatLonHei['lat'] = poseXYZ['y'] * 360 / eartPerim\n poseLatLonHei['lon'] = poseXYZ['x'] * 360 / eartPerimLon\n poseLatLonHei['hei'] = poseXYZ['z']\n\n return poseLatLonHei\n\ndef body2NED(CMDVel, Pose3D):\n\n\n #q1 = [0, CMDVel.linearX, CMDVel.linearY, CMDVel.linearZ]\n #q2 = [Pose3D.q0, Pose3D.q1, Pose3D.q2, Pose3D.q3]\n\n #q1 = qNormal(q1)\n #q2 = qNormal(q2)\n\n ##rotation = q2*q1*q2'\n\n #q2inverse = qInverse(q2)\n #qtempotal = qMultiply(q1,q2inverse)\n #q = qMultiply(q2,qtempotal)\n\n #rotatedVector = q[1:len(q)] #obtain [q1,q2,q3]\n\n #return rotatedVector\n\n q0 = Pose3D.q0\n q1 = Pose3D.q1\n q2 = Pose3D.q2\n q3 = Pose3D.q3\n\n # obtain eulers from quaternion TO BE IMPROVED!!!!!!!!!!!\n\n #roll = 1/ math.tan((2*(q1*q2+q0*q3))/(q3*q3+q2*q2-q1*q1-q0*q0))\n #pitch = 1/math.sin(-2*(q0*q2-q1*q3))\n #yaw = 1/ math.tan((2*(q0*q1+q3*q2))/(q3*q3-q2*q2-q1*q1+q0*q0))\n\n # Body velocity (x,y,z)\n\n bvx = CMDVel.linearX\n bvy = CMDVel.linearY\n bvz = CMDVel.linearZ\n\n NEDvel = [0,0,0] #[x,y,z]\n\n #NEDvel[0] = bvx * math.cos(pitch)*math.cos(yaw) + bvy * (math.sin(roll)*math.sin(pitch)*math.cos(yaw) - math.cos(roll)*math.sin(yaw)) + bvz * (math.cos(roll)*math.sin(pitch)*math.cos(yaw) + math.sin(roll)*math.sin(yaw))\n #NEDvel[1] = bvx * math.cos(pitch)*math.sin(yaw) + bvy * (math.sin(roll)*math.sin(pitch)*math.sin(yaw) + math.cos(roll)*math.cos(yaw)) + bvz * (math.cos(roll)*math.sin(pitch)*math.sin(yaw) - math.sin(roll)*math.cos(yaw))\n #NEDvel[2] = -bvx * math.sin(pitch) + bvy * (math.sin(roll)*math.cos(pitch)) + bvz * (math.cos(roll)*math.cos(pitch))\n\n NEDvel[0]=bvx\n NEDvel[1]=bvy\n NEDvel[2]=bvz\n\n return NEDvel\n\ndef qMultiply (q1,q2):\n\n q1 = qNormal(q1)\n q2 = qNormal(q2)\n\n # quaternion1\n w1 = q1[0]\n x1 = q1[1]\n y1 = q1[2]\n z1 = q1[3]\n\n #quaternion2\n w2 = q2[0]\n x2 = q2[1]\n y2 = q2[2]\n z2 = q2[3]\n\n w = w1*w2 - x1*x2 - y1*y2 - z1*z2\n x = w1*x2 + x1*w2 + y1*z2 - z1*y2\n y = w1*y2 + y1*w2 + z1*x2 - x1*z2\n z = w1*z2 + z1*w2 + x1*y2 - y1*x2\n\n q = [w,x,y,z]\n\n q = qNormal(q)\n return q\n\ndef qNormal(q1):\n\n qmodule = math.sqrt(q1[0]*q1[0] + q1[1]*q1[1] + q1[2]*q1[2] + q1[3]*q1[3])\n q = [0,0,0,0]\n\n if (qmodule == 0):\n qmodule = 0.000000000001\n\n q[0] = q1[0] / qmodule\n q[1] = q1[1] / qmodule\n q[2] = q1[2] / qmodule\n q[3] = q1[3] / qmodule\n\n return q\n\ndef qConjugate(q1):\n\n q1 = qNormal(q1)\n q = [0,0,0,0]\n q[0] = q1[0]\n q[1] = -q1[1]\n q[2] = -q1[2]\n q[3] = -q1[3]\n\n q = qNormal(q)\n return q\n\ndef qInverse(q1):\n\n q1 = qNormal(q1)\n qconjugate = qConjugate(q1)\n qmodule = math.sqrt(q1[0] * q1[0] + q1[1] * q1[1] + q1[2] * q1[2] + q1[3] * q1[3])\n\n if (qmodule == 0):\n qmodule = 0.000000000001\n\n q = [0,0,0,0]\n q[0] = qconjugate[0] / qmodule\n q[1] = qconjugate[1] / qmodule\n q[2] = qconjugate[2] / qmodule\n q[3] = qconjugate[3] / qmodule\n\n q = qNormal(q)\n return q\n\n#####################################################################\n\nif __name__ == '__main__':\n from optparse import OptionParser\n parser = OptionParser(\"mavproxy.py [options]\")\n\n parser.add_option(\"--master\", dest=\"master\", action='append',\n metavar=\"DEVICE[,BAUD]\", help=\"MAVLink master port and optional baud rate\",\n default=[])\n parser.add_option(\"--udp\", dest=\"udp\", action='append', help=\"run udp server\")\n parser.add_option(\"--tcp\", dest=\"tcp\", action='append', help=\"run tcp server\")\n parser.add_option(\"--out\", dest=\"output\", action='append',\n metavar=\"DEVICE[,BAUD]\", help=\"MAVLink output port and optional baud rate\",\n default=[])\n parser.add_option(\"--baudrate\", dest=\"baudrate\", type='int',\n help=\"default serial baud rate\", default=57600)\n parser.add_option(\"--sitl\", dest=\"sitl\", default=None, help=\"SITL output port\")\n parser.add_option(\"--streamrate\",dest=\"streamrate\", default=4, type='int',\n help=\"MAVLink stream rate\")\n parser.add_option(\"--source-system\", dest='SOURCE_SYSTEM', type='int',\n default=255, help='MAVLink source system for this GCS')\n parser.add_option(\"--source-component\", dest='SOURCE_COMPONENT', type='int',\n default=0, help='MAVLink source component for this GCS')\n parser.add_option(\"--target-system\", dest='TARGET_SYSTEM', type='int',\n default=0, help='MAVLink target master system')\n parser.add_option(\"--target-component\", dest='TARGET_COMPONENT', type='int',\n default=0, help='MAVLink target master component')\n parser.add_option(\"--logfile\", dest=\"logfile\", help=\"MAVLink master logfile\",\n default='mav.tlog')\n parser.add_option(\"-a\", \"--append-log\", dest=\"append_log\", help=\"Append to log files\",\n action='store_true', default=False)\n parser.add_option(\"--quadcopter\", dest=\"quadcopter\", help=\"use quadcopter controls\",\n action='store_true', default=False)\n parser.add_option(\"--setup\", dest=\"setup\", help=\"start in setup mode\",\n action='store_true', default=False)\n parser.add_option(\"--nodtr\", dest=\"nodtr\", help=\"disable DTR drop on close\",\n action='store_true', default=False)\n parser.add_option(\"--show-errors\", dest=\"show_errors\", help=\"show MAVLink error packets\",\n action='store_true', default=False)\n parser.add_option(\"--speech\", dest=\"speech\", help=\"use text to speach\",\n action='store_true', default=False)\n parser.add_option(\"--aircraft\", dest=\"aircraft\", help=\"aircraft name\", default=None)\n parser.add_option(\"--cmd\", dest=\"cmd\", help=\"initial commands\", default=None, action='append')\n parser.add_option(\"--console\", action='store_true', help=\"use GUI console\")\n parser.add_option(\"--map\", action='store_true', help=\"load map module\")\n parser.add_option(\n '--load-module',\n action='append',\n default=[],\n help='Load the specified module. Can be used multiple times, or with a comma separated list')\n parser.add_option(\"--mav09\", action='store_true', default=False, help=\"Use MAVLink protocol 0.9\")\n parser.add_option(\"--auto-protocol\", action='store_true', default=False, help=\"Auto detect MAVLink protocol version\")\n parser.add_option(\"--nowait\", action='store_true', default=False, help=\"don't wait for HEARTBEAT on startup\")\n parser.add_option(\"-c\", \"--continue\", dest='continue_mode', action='store_true', default=False, help=\"continue logs\")\n parser.add_option(\"--dialect\", default=\"ardupilotmega\", help=\"MAVLink dialect\")\n parser.add_option(\"--rtscts\", action='store_true', help=\"enable hardware RTS/CTS flow control\")\n parser.add_option(\"--moddebug\", type=int, help=\"module debug level\", default=0)\n parser.add_option(\"--mission\", dest=\"mission\", help=\"mission name\", default=None)\n parser.add_option(\"--daemon\", action='store_true', help=\"run in daemon mode, do not start interactive shell\")\n parser.add_option(\"--profile\", action='store_true', help=\"run the Yappi python profiler\")\n parser.add_option(\"--state-basedir\", default=None, help=\"base directory for logs and aircraft directories\")\n parser.add_option(\"--version\", action='store_true', help=\"version information\")\n parser.add_option(\"--default-modules\", default=\"log,wp,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output\", help='default module list')\n\n (opts, args) = parser.parse_args()\n\n # warn people about ModemManager which interferes badly with APM and Pixhawk\n if os.path.exists(\"/usr/sbin/ModemManager\"):\n print(\"WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk\")\n\n if opts.mav09:\n os.environ['MAVLINK09'] = '1'\n from pymavlink import mavutil, mavparm\n mavutil.set_dialect(opts.dialect)\n\n #version information\n if opts.version:\n import pkg_resources\n version = pkg_resources.require(\"mavproxy\")[0].version\n print(\"MAVProxy is a modular ground station using the mavlink protocol\")\n print(\"MAVProxy Version: \" + version)\n sys.exit(1)\n\n # global mavproxy state\n mpstate = MPState()\n mpstate.status.exit = False\n mpstate.command_map = command_map\n mpstate.continue_mode = opts.continue_mode\n # queues for logging\n mpstate.logqueue = Queue.Queue()\n mpstate.logqueue_raw = Queue.Queue()\n\n if opts.udp:\n mpstate.udp.connect(opts.udp[0].split(\":\")[0], int(opts.udp[0].split(\":\")[1]))\n print(\"Connected (UDP) to \" + mpstate.udp.address + \":\" + str(mpstate.udp.port))\n\n if opts.tcp:\n mpstate.tcp.connect(opts.tcp[0].split(\":\")[0], int(opts.tcp[0].split(\":\")[1]))\n print(\"Client (TCP) connected at \" + mpstate.tcp.client[0] + \":\" + str(mpstate.tcp.port))\n\n if opts.speech:\n # start the speech-dispatcher early, so it doesn't inherit any ports from\n # modules/mavutil\n load_module('speech')\n\n if not opts.master:\n serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',\"*Arduino_Mega_2560*\", \"*3D_Robotics*\", \"*USB_to_UART*\", '*PX4*', '*FMU*'])\n print('Auto-detected serial ports are:')\n for port in serial_list:\n print(\"%s\" % port)\n\n # container for status information\n mpstate.settings.target_system = opts.TARGET_SYSTEM\n mpstate.settings.target_component = opts.TARGET_COMPONENT\n\n mpstate.mav_master = []\n\n mpstate.rl = rline.rline(\"MAV> \", mpstate)\n\n def quit_handler(signum = None, frame = None):\n #print 'Signal handler called with signal', signum\n if mpstate.status.exit:\n print('Clean shutdown impossible, forcing an exit')\n sys.exit(0)\n else:\n mpstate.status.exit = True\n\n # Listen for kill signals to cleanly shutdown modules\n fatalsignals = [signal.SIGTERM]\n try:\n fatalsignals.append(signal.SIGHUP)\n fatalsignals.append(signal.SIGQUIT)\n except Exception:\n pass\n if opts.daemon: # SIGINT breaks readline parsing - if we are interactive, just let things die\n fatalsignals.append(signal.SIGINT)\n\n for sig in fatalsignals:\n signal.signal(sig, quit_handler)\n\n load_module('link', quiet=True)\n\n mpstate.settings.source_system = opts.SOURCE_SYSTEM\n mpstate.settings.source_component = opts.SOURCE_COMPONENT\n\n # open master link\n for mdev in opts.master:\n if not mpstate.module('link').link_add(mdev):\n sys.exit(1)\n\n if not opts.master and len(serial_list) == 1:\n print(\"Connecting to %s\" % serial_list[0])\n mpstate.module('link').link_add(serial_list[0].device)\n elif not opts.master:\n wifi_device = '0.0.0.0:14550'\n mpstate.module('link').link_add(wifi_device)\n\n\n # open any mavlink output ports\n for port in opts.output:\n mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))\n\n if opts.sitl:\n mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)\n\n mpstate.settings.streamrate = opts.streamrate\n mpstate.settings.streamrate2 = opts.streamrate\n\n if opts.state_basedir is not None:\n mpstate.settings.state_basedir = opts.state_basedir\n\n msg_period = mavutil.periodic_event(1.0/15)\n heartbeat_period = mavutil.periodic_event(1)\n heartbeat_check_period = mavutil.periodic_event(0.33)\n\n mpstate.input_queue = Queue.Queue()\n mpstate.input_count = 0\n mpstate.empty_input_count = 0\n if opts.setup:\n mpstate.rl.set_prompt(\"\")\n\n # call this early so that logdir is setup based on --aircraft\n (mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()\n\n if not opts.setup:\n # some core functionality is in modules\n standard_modules = opts.default_modules.split(',')\n for m in standard_modules:\n load_module(m, quiet=True)\n\n if opts.console:\n process_stdin('module load console')\n\n if opts.map:\n process_stdin('module load map')\n\n for module in opts.load_module:\n modlist = module.split(',')\n for mod in modlist:\n process_stdin('module load %s' % mod)\n\n if 'HOME' in os.environ and not opts.setup:\n start_script = os.path.join(os.environ['HOME'], \".mavinit.scr\")\n if os.path.exists(start_script):\n run_script(start_script)\n if 'LOCALAPPDATA' in os.environ and not opts.setup:\n start_script = os.path.join(os.environ['LOCALAPPDATA'], \"MAVProxy\", \"mavinit.scr\")\n if os.path.exists(start_script):\n run_script(start_script)\n\n if opts.aircraft is not None:\n start_script = os.path.join(opts.aircraft, \"mavinit.scr\")\n if os.path.exists(start_script):\n run_script(start_script)\n else:\n print(\"no script %s\" % start_script)\n\n if opts.cmd is not None:\n for cstr in opts.cmd:\n cmds = cstr.split(';')\n for c in cmds:\n process_stdin(c)\n\n if opts.profile:\n import yappi # We do the import here so that we won't barf if run normally and yappi not available\n yappi.start()\n\n # log all packets from the master, for later replay\n open_telemetry_logs(logpath_telem, logpath_telem_raw)\n\n ########################## Jorge Cano CODE ##########################\n\n PH_Pose3D = Pose3DI(0,0,0,0,0,0,0,0) #1 to avoid indeterminations\n PH_CMDVel = CMDVelI(0,0,0,0,0,0) #1 to avoid indeterminations\n PH_Extra = ExtraI()\n WP_Pose3D = Pose3DI(0,0,0,0,0,0,0,0)\n\n #####################################################################\n global operation_takeoff\n global on_air\n global time_init_operation_takeoff\n global time_end_operation_takeoff\n\n operation_takeoff = False\n on_air = False\n time_init_operation_takeoff = 10000000000000000000\n time_end_operation_takeoff = 10000000000000000000\n print(\"Variables a false\")\n # run main loop as a thread\n\n mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')\n mpstate.status.thread.daemon = True\n mpstate.status.thread.start()\n\n ########################## Jorge Cano CODE ##########################\n\n #Open an ICE TX communication and leave it open in a parallel threat\n\n PoseTheading = threading.Thread(target=openPose3DChannel, args=(PH_Pose3D,), name='Pose_Theading')\n PoseTheading.daemon = True\n PoseTheading.start()\n\n # Open an ICE RX communication and leave it open in a parallel threat\n\n CMDVelTheading = threading.Thread(target=openCMDVelChannel, args=(PH_CMDVel,), name='CMDVel_Theading')\n CMDVelTheading.daemon = True\n CMDVelTheading.start()\n\n # Open an ICE TX communication and leave it open in a parallel threat\n\n CMDVelTheading = threading.Thread(target=openExtraChannel, args=(PH_Extra,), name='Extra_Theading')\n CMDVelTheading.daemon = True\n CMDVelTheading.start()\n\n # Open an ICE channel empty\n\n CMDVelTheading = threading.Thread(target=openNavdataChannel, args=(), name='Navdata_Theading')\n CMDVelTheading.daemon = True\n CMDVelTheading.start()\n\n # # Open an MAVLink TX communication and leave it open in a parallel threat\n #\n PoseTheading = threading.Thread(target=sendCMDVel2Vehicle, args=(PH_CMDVel,PH_Pose3D,), name='TxCMDVel_Theading')\n PoseTheading.daemon = True\n PoseTheading.start()\n\n\n # Open an ICE TX communication and leave it open in a parallel threat\n\n PoseTheading = threading.Thread(target=openPose3DChannelWP, args=(WP_Pose3D,), name='WayPoint_Theading')\n PoseTheading.daemon = True\n PoseTheading.start()\n\n # Open an MAVLink TX communication and leave it open in a parallel threat\n\n PoseTheading = threading.Thread(target=sendWayPoint2Vehicle, args=(WP_Pose3D,), name='WayPoint2Vehicle_Theading')\n PoseTheading.daemon = True\n PoseTheading.start()\n\n # Open an MAVLink TX communication and leave it open in a parallel threat\n\n PoseTheading = threading.Thread(target=landDecision, args=(PH_Extra,), name='LandDecision2Vehicle_Theading')\n PoseTheading.daemon = True\n PoseTheading.start()\n\n\n\n #while True:\n # time.sleep(1)\n # Posejarl = PH_Pose3D.getPose3DData()\n # print (Posejarl)\n\n #####################################################################\n\n # use main program for input. This ensures the terminal cleans\n # up on exit\n while (mpstate.status.exit != True):\n try:\n if opts.daemon:\n time.sleep(0.1)\n else:\n input_loop()\n except KeyboardInterrupt:\n if mpstate.settings.requireexit:\n print(\"Interrupt caught. Use 'exit' to quit MAVProxy.\")\n\n #Just lost the map and console, get them back:\n for (m,pm) in mpstate.modules:\n if m.name in [\"map\", \"console\"]:\n if hasattr(m, 'unload'):\n try:\n m.unload()\n except Exception:\n pass\n reload(m)\n m.init(mpstate)\n\n else:\n mpstate.status.exit = True\n sys.exit(1)\n\n if opts.profile:\n yappi.get_func_stats().print_all()\n yappi.get_thread_stats().print_all()\n\n #this loop executes after leaving the above loop and is for cleanup on exit\n for (m,pm) in mpstate.modules:\n if hasattr(m, 'unload'):\n print(\"Unloading module %s\" % m.name)\n m.unload()\n\n sys.exit(1)\n","repo_name":"TheRoboticsClub/colab-gsoc2017-SepehrMohaimanian","sub_path":"src/drivers/MAVLinkServer/MAVProxy/mavproxy.py","file_name":"mavproxy.py","file_ext":"py","file_size_in_byte":56338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"38880454206","text":"#!/usr/bin/python\nimport argparse\nimport csv\nimport re\nparser = argparse.ArgumentParser() \n\n# Sort a dictionary of word-frequency pairs in order of descending frequency.\ndef sortFreqDict(freqdict):\n aux = [(freqdict[key],key) for key in freqdict]\n aux.sort()\n aux.reverse()\n return aux\n\nwordstring=''\n\nparser.add_argument(\"--file\", \"-f\", type=str, required=True)\nparser.add_argument(\"--file_with_context\", \"-fc\", type=str, required=True)\nparser.add_argument(\"--topnwords\", \"-N\", type=int, required=True)\n\nargs = parser.parse_args()\n\n#remember this works only for lists after all...\nwith open (args.file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=None)\n\n for i,line in enumerate(reader):\n if i<=args.topnwords: #find first N words\n #I want the second column which are the unique words for each category\n wordstring += line[1]+\"\\n\"\n\nwordlist = wordstring.split(\"\\n\")\n#print (lines[0])\nwordfreq = []\nfor word in wordlist:\n wordfrequency=0\n with open (args.file_with_context,\"r\") as f2: \n for line in f2:\n if re.search(rf\"\\b{word}\\b\", line, re.IGNORECASE):\n wordfrequency+=1\n wordfreq.append(wordfrequency)\n\ndictionary = dict(list(zip(wordlist,wordfreq)))\nsorteddict = sortFreqDict(dictionary)\n\nfor s in sorteddict:\n #print only if found\n if (s[0] >0): \n print(str(s[0])+\"\\t\"+str(s[1]))","repo_name":"katnastou/BioBERT-based-entity-type-classifier","sub_path":"extra_scripts/find_words_from_lists.py","file_name":"find_words_from_lists.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72051020951","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\nimport glob\nimport json\nimport sys\nimport numpy as np\nimport os\n\nLEARNED_PARAM_KEY = \"readErrorModel\"\nMIN_DIST_OFF = .00000001\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Consolidates output learned parameters for chunked marginPhase run\")\n parser.add_argument('--params', '-p', dest='param_glob', default=\"*.json\", type=str,\n help='Matches for the params we\\'re getting values for')\n parser.add_argument('--output', '-o', dest='output_file', default=None, type=str,\n help='Output file. Default:stdout')\n parser.add_argument('--alphabet_size', '-s', dest='alphabet_size', default=5, type=int,\n help='Size of alphabet for parameters')\n\n return parser.parse_args()\n\n\ndef log(msg, include_stdout_also = False):\n print(msg, file=sys.stderr)\n if include_stdout_also:\n print(msg, file=sys.stdout)\n\n\ndef print_params(param_list, alphabet_size, file=sys.stderr):\n file.write(\" [\\n\")\n for i in range(alphabet_size):\n file.write(\" \" * 8)\n for j in range(alphabet_size):\n idx = i * alphabet_size + j\n file.write(\" %.8f\" % param_list[idx])\n if idx != alphabet_size * alphabet_size - 1: file.write(\",\")\n file.write(\"\\n\")\n file.write(\" ]\\n\")\n\n\ndef main():\n args = parse_args()\n\n in_params = glob.glob(args.param_glob)\n if len(in_params) == 0:\n log(\"No files matching {}\".format(args.param_glob))\n return 1\n else:\n log(\"Analyzing {} files\".format(len(in_params)))\n\n param_size = args.alphabet_size ** 2\n all_param_values = [[] for _ in range(param_size)]\n\n missing_key = 0\n wrong_length = 0\n for in_param in in_params:\n with open(in_param) as input:\n param_str = \"\"\n for line in input:\n param_str += \" \".join(line.strip().split())\n if param_str.endswith(\",}\"):\n param_str = param_str.replace(\",}\", \" }\")\n params = json.loads(param_str)\n if LEARNED_PARAM_KEY not in params:\n if missing_key == 0: log(\"Key '{}' not in {}\".format(LEARNED_PARAM_KEY, in_param))\n missing_key += 1\n continue\n learned_param = params[LEARNED_PARAM_KEY]\n if len(learned_param) != param_size:\n if wrong_length == 0: log(\"Expected size {}x{}, got {} in {}\".format(\n args.alphabet_size,args.alphabet_size,len(learned_param), in_param))\n wrong_length += 1\n continue\n for i in range(param_size):\n all_param_values[i].append(learned_param[i])\n\n if missing_key != 0: log(\"{} files were missing keys\".format(missing_key))\n if wrong_length != 0: log(\"{} files had the wrong length\".format(wrong_length))\n if len(all_param_values[0]) == 0:\n log(\"No valid files matching {}\".format(args.param_glob))\n return 1\n else:\n log(\"Consolidating params from {} files\".format(len(all_param_values[0])))\n\n stddev_params = [np.std(x) for x in all_param_values]\n output_params = [np.mean(x) for x in all_param_values]\n for i in range(args.alphabet_size):\n row_total = sum(output_params[(i*args.alphabet_size):((i+1)*args.alphabet_size)])\n for j in range(args.alphabet_size):\n idx = i*args.alphabet_size + j\n output_params[idx] = output_params[idx] / row_total\n new_row_total = sum(output_params[(i*args.alphabet_size):((i+1)*args.alphabet_size)])\n assert abs(new_row_total-1) < MIN_DIST_OFF\n assert abs(sum(output_params)- args.alphabet_size) < MIN_DIST_OFF\n\n log(\"\\nSTD DEV (beware if these are large):\")\n print_params(stddev_params, args.alphabet_size)\n\n log(\"\\nOutput params:\")\n print_params(output_params, args.alphabet_size, file=(sys.stdout if args.output_file is None else sys.stderr))\n\n if args.output_file is not None:\n with open(args.output_file, 'w') as output:\n print_params(output_params, args.alphabet_size, file=output)\n\n log(\"\\nFin.\")\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"benedictpaten/marginPhase","sub_path":"toil/src/toil_marginphase/scripts/consolidate_params.py","file_name":"consolidate_params.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"5"} +{"seq_id":"17603304814","text":"from django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n\n path('', views.post_list, name='post_list'),\n\n# Переход на конкретный пост\n re_path(r'^(?P<year>\\d{4})/(?P<month>\\d{2})/(?P<day>\\d{2})/(?P<post>[-\\w]+)/$',\n views.post_detail,\n name='post_detail'),\n]","repo_name":"AlDmt/Django","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20352113406","text":"import PySimpleGUI as sg\nfrom zip import create_zip\n\n\nlabel1 = sg.Text(\"Add files to compress: \")\ninput1 = sg.Input()\nbutton1 = sg.FilesBrowse(\"Choose\", key='files')\n\nlabel2 = sg.Text(\"Select destination folder: \")\ninput2 = sg.Input()\nbutton2 = sg.FolderBrowse(\"Choose\", key='folders')\n\nbutton3 = sg.Button(\"Compress\")\n\noutput = sg.Text(key=\"output\")\n\nwindow = sg.Window(\"File Compressor\", layout=[[label1, input1, button1],\n [label2, input2, button2 ],\n [button3, output]])\n\nwhile True:\n events, values = window.read()\n print(events, values)\n if events == sg.WIN_CLOSED:\n break\n filepaths = values['files'].split(';')\n folders = values['folders']\n create_zip(filepaths, folders)\n window[\"output\"].update(value=\"successfully compressed\")\n\nwindow.close()\n","repo_name":"donfortune/python_file_compressor_GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40872259425","text":"from base64 import b64encode, b64decode\n\ndef encrypt(plain):\n key = \"n0k3y\"\n cipher = \"\"\n for c, i in enumerate(plain):\n cipher += chr(ord(i) ^ ord(key[c % 5]))\n\n print(b64encode(cipher.encode()))\n\n# if __name__ == \"__main__\":\n# plain = input()\n# encrypt(plain)\n\ndef decrypt(cipher):\n cipher = b64decode(cipher.encode()).decode()\n key = \"n0k3y\"\n uncipher = \"\"\n for c, i in enumerate(cipher):\n uncipher+=chr(ord(i)^ord(key[c%5]))\n\n return uncipher\n\n\n\nprint(decrypt(\"Ins4AUlcAyZSFQ9eDEgAIUU0V0oNQhJDDTFdWE4=\"))","repo_name":"FlaBBB/Cybers_security","sub_path":"CTF/2023/LKS 2023/Kota (Malang)/Crypto/crypto 3 (solve).py","file_name":"crypto 3 (solve).py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"35297756189","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 5 09:15:41 2020\n\n1217. Minimum Cost to Move Chips to The Same Position\n\nfree for odds to move to odds, and evens to move to evens. Cost for an odd to move to any even is 1, and vice versa.\nJust find out if its better to finish on an even or odd spot.\n\n@author: Robert Xu\n\"\"\"\nclass Solution(object):\n def minCostToMoveChips(self, position):\n \"\"\"\n :type position: List[int]\n :rtype: int\n \"\"\"\n if len(position) == 1:\n return 0\n \n odds = evens = 0\n \n for pos in position:\n \n if pos % 2 == 0:\n odds += 1\n \n else:\n evens += 1\n \n return min(odds, evens)\n ","repo_name":"xu-robert/Leetcode-daily-challenges","sub_path":"Nov2020/Nov5.py","file_name":"Nov5.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43514153965","text":"from flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\nclass Course(db.Model):\n\n # Map this model to a flights table\n __tablename__ = \"courses\"\n\n # Specify the columns/ fields of the model.\n id = db.Column(db.Integer, primary_key=True)\n course_number = db.Column(db.String, nullable=False)\n course_title = db.Column(db.String, nullable=False)\n\n #Specify any relationship fields.\n students = db.relationship(\"RegisteredStudent\", backref=\"courses\", lazy=True)\n\n # specify any utility methods associated with the model.\n def add_student(self, name, grade):\n # Notice that we set the foreign key for the passenger class.\n new_student = RegisteredStudent(name=name, grade = grade, course_id=self.id)\n db.session.add(new_student)\n db.session.commit()\n \n\nclass RegisteredStudent(db.Model):\n\n __tablename__ = \"registered_student\"\n \n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n grade = db.Column(db.String, nullable=False)\n\n # Notice, this field serves as a foreighKey.\n course_id = db.Column(db.Integer, db.ForeignKey('courses.id'), nullable=False)\n","repo_name":"gracelamalva/CUS1166","sub_path":"CUS1166_Lab5/playground/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16738945947","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom wikidataintegrator import wdi_core, wdi_login, wdi_config\nfrom getpass import getpass\nimport pandas as pd\nimport os\n\n#wbstack = os.environ[\"WBSTACK\"]\nwikibase = \"https://luxsaf-demo.micel.io/\"\n#wikibase = \"https://{}.wiki.opencura.com/\".format(wbstack)\napi = \"https://luxsaf-demo.micel.io/w/api.php\"\nsparql = \"https://luxsaf-demo.micel.io/query/sparql\"\nentityUri = wikibase.replace(\"https:\", \"http:\")+\"entity/\"\nWBUSER = os.environ[\"MW_ADMIN_NAME\"]\nWBPASS = os.environ[\"MW_ADMIN_PASS\"]\nlogin = wdi_login.WDLogin(WBUSER, WBPASS, mediawiki_api_url=api)\nlocalEntityEngine = wdi_core.WDItemEngine.wikibase_item_engine_factory(api,sparql)\n\nmodel_def = pd.read_excel(\"../../DM_SAF/DM_SAF_vers.1.0.3_andra.xls\", header=1)\n\ndef createProperty(login=login, wdprop=None, lulabel=\"\", enlabel=\"\", frlabel=\"\", delabel=\"\", description=\"\", property_datatype=\"\"):\n if wdprop== None:\n s = []\n else:\n s = [wdi_core.WDUrl(wdprop, prop_nr=\"P1\")]\n localEntityEngine = wdi_core.WDItemEngine.wikibase_item_engine_factory(api,sparql)\n item = localEntityEngine(data=s, core_props=set())\n if lulabel != \"\":\n item.set_label(lulabel, lang=\"lb\")\n item.set_label(enlabel, lang=\"en\")\n item.set_label(delabel, lang=\"de\")\n item.set_label(frlabel, lang=\"fr\")\n item.set_description(description, lang=\"en\")\n print(item.write(login, entity_type=\"property\", property_datatype=property_datatype))\n\n# instance of\ncreateProperty(login, lulabel=\"ass eng\",\n enlabel=\"instance of\",\n frlabel=\"instance de\",\n delabel=\"ist ein(e)\",\n property_datatype=\"wikibase-item\")\n\n# subclass of\n##\ncreateProperty(login, lulabel=\"Ënnerklass vu(n)\",\n enlabel=\"subclass of\",\n frlabel=\"sous-classe de\",\n delabel=\"Unterklasse von\",\n property_datatype=\"wikibase-item\")\n# skos:exact match\ncreateProperty(login, lulabel=\"genauen Match\",\n enlabel=\"exact match\",\n frlabel=\"correspondance exacte\",\n delabel=\"exakte Übereinstimmung\",\n description=\"mapping\",\n property_datatype=\"url\")\n#domain\ncreateProperty(login, lulabel=\"domain\",\n enlabel=\"domain\",\n frlabel=\"domaine\",\n delabel=\"domain\",\n property_datatype=\"wikibase-item\")\n#range\ncreateProperty(login, lulabel=\"reechwäit\",\n enlabel=\"range\",\n frlabel=\"intervalle\",\n delabel=\"reichweite\",\n property_datatype=\"wikibase-item\")\n\n#property\ncreateProperty(login, enlabel=\"property\",\n property_datatype=\"wikibase-item\")\n#subPropertyOf\ncreateProperty(login, lulabel=\"Ënnerbesëtz vun\",\n enlabel=\"subproperty of\",\n frlabel=\"sous-propriété de\",\n delabel=\"untereigenschaft von\",\n property_datatype=\"wikibase-item\")\n#inverseOf\ncreateProperty(login, lulabel=\"invers vun\",\n enlabel=\"inverse of\",\n frlabel=\"inverse de\",\n delabel=\"invers von\",\n property_datatype=\"wikibase-item\")\n\nfor index, row in model_def.iterrows():\n if row[\"Data type\"].strip() in wdi_config.property_value_types.keys():\n print(row[\"Data type\"])\n try:\n createProperty(login, enlabel=row[\"English\"], frlabel=row[\"français\"], delabel=row[\"Deutsch\"], description=\"Lux SAF Property\", property_datatype=row[\"Data type\"].strip())\n except:\n print(\"Error with \", row[\"English\"])\n else:\n print(\"Error\", row[\"Data type\"])\n\n## Items\n# class item\nitem = localEntityEngine(new_item=True, core_props=set())\nitem.set_label(\"Class\", lang=\"en\")\nitem.set_aliases([\"Owl:Class\"], lang=\"en\")\nprint(item.write(login))\n\n# property item\nitem = localEntityEngine(new_item=True, core_props=set())\nitem.set_label(\"Property\", lang=\"en\")\nitem.set_aliases([\"owl:ObjectProperty\"], lang=\"en\")\nprint(item.write(login))\n\nCL4 = pd.read_excel(\"../../DM_SAF/DM_SAF_vers.1.0.3_andra.xls\", sheet_name=\"CL4 GENDER\")\nfor index, row in CL4.iterrows():\n print(row[\"Label (English)\"])\n item = localEntityEngine(new_item=True, core_props=set())\n item.set_label(row[\"Label (English)\"], lang=\"en\")\n item.set_label(row[\"Label (German)\"], lang=\"de\")\n item.set_label(row[\"Label (French)\"], lang=\"fr\")\n print(item.write(login))\n\nCL5 = pd.read_excel(\"../../DM_SAF/DM_SAF_vers.1.0.3_andra.xls\", sheet_name=\"CL5 STATUS\")\nfor index, row in CL5.iterrows():\n print(row[\"Label (English)\"])\n item = localEntityEngine(new_item=True)\n item.set_label(row[\"Label (English)\"], lang=\"en\")\n item.set_label(row[\"Label (German)\"], lang=\"de\")\n item.set_label(row[\"Label (French)\"], lang=\"fr\")\n print(item.write(login))\n\nCL3 = pd.read_excel(\"../../DM_SAF/DM_SAF_vers.1.0.3_andra.xls\", sheet_name=\"CL3 Name Format\")\nfor index, row in CL3.iterrows():\n item = localEntityEngine(new_item=True, core_props=set())\n item.set_label(row[\"Cataloging specs\"])\n print(item.write(login))\n\nCL8 = pd.read_excel(\"../../DM_SAF/DM_SAF_vers.1.0.3_andra.xls\", sheet_name=\"CL8 INTERNAL IDENTIFIER\")\nfor index, row in CL8.iterrows():\n print(row[\"Label \"])\n createProperty(login, lulabel=row[\"Label \"].strip(), \n enlabel=row[\"Label \"].strip(),\n frlabel=row[\"Label \"].strip(),\n delabel=row[\"Label \"].strip(),\n property_datatype=\"external-id\")\n\n#ARK\ncreateProperty(login, lulabel=\"ARK\", \n enlabel=\"ARK\",\n frlabel=\"ARK\",\n delabel=\"ARK\",\n property_datatype=\"url\")\n\n\nperson_item = localEntityEngine(new_item=True, core_props=set())\nperson_item.set_label(\"E21 Person\", lang=\"en\")\nprint(person_item.write(login))\n\n\n\n\n","repo_name":"tentwentyfour/SAF-Lux","sub_path":"import_scripts/property_builder/SAF-DEV-scripts/create_properties_wbstack.py","file_name":"create_properties_wbstack.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"26169526427","text":"import sys\nimport time as t\nimport consolemanager\nimport begin\nimport random\n\n# Text format block things\nbig = \"--<======================>--\"\nsmall = \"-<===============>-\"\n\nconsole_info = \"\"\nPADDINGNONE = consolemanager.Rectangle(0, 0, 1, 0)\nPADDINGMIDDLE = consolemanager.Rectangle(0, 23, 1, 2)\nPADDINGWIPEART = consolemanager.Rectangle(0, 0, 1, 18)\n\n\ndef printSlow(\n fstr, waitTime=0, nextLine=True, typeSpeed=0.02, PADDINGAREA=PADDINGMIDDLE\n):\n \"Function to Type out Printed strings\"\n global console_info\n console = begin.console\n console_info = console.get_console_info()\n for char in fstr:\n print(char, end=\"\", flush=True)\n t.sleep(typeSpeed)\n t.sleep(waitTime)\n if nextLine == True:\n scroll_text_up(PADDINGAREA, 1, 0, PADDINGAREA)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n return \"\"\n\n\ndef scroll_text_up(\n rectangle: consolemanager.Rectangle, clear_rows=1, waitTime=0, padding=PADDINGNONE\n):\n \"Function to move text up\"\n console = begin.console\n ci = console.get_console_info()\n for row in range(\n rectangle.top + 1,\n ci.window_rectangle.bottom + 1 - rectangle.bottom - clear_rows,\n ):\n for clear_row in range(clear_rows):\n console.clear_line_until(\n ci.window_rectangle.right - rectangle.right - rectangle.left,\n row - 1 + clear_row,\n x_start=rectangle.left,\n )\n line = console.read_console_line(row + clear_row)[\n rectangle.left : -rectangle.right\n ]\n console.clear_line_until(\n console_info.window_rectangle.right - padding.right, row\n )\n console.set_cursor_pos(rectangle.left, row - 1 + clear_row)\n print(line, end=\"\", flush=True)\n t.sleep(waitTime)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef clear_screen(padding=PADDINGMIDDLE, waitTime=0.018):\n \"Function to clear the screen WITH padding\"\n console = begin.console\n for row in range(console.get_console_info().window_rectangle.bottom - padding.top):\n scroll_text_up(padding, 1, 0, padding)\n t.sleep(waitTime)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef confirm(keep=0, padding=PADDINGMIDDLE):\n \"Function when I want user to press 'Enter' as confirmation.\"\n console = begin.console\n input(printSlow(\"Press Enter to continue...\"))\n printSlow(\"\")\n if keep == 0:\n for row in range(\n console.get_console_info().window_rectangle.bottom - padding.top\n ):\n scroll_text_up(padding)\n t.sleep(0.015)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef qAnswer(question, safeZone=False, PADDINGAREA=PADDINGMIDDLE):\n \"Question function to utilize all over and to check for command usage. Returns the Inputed Answer.\"\n console = begin.console\n scroll_text_up(PADDINGAREA, 1, 0, PADDINGAREA)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n printSlow(f\"{question}\", 0, False, 0.03, PADDINGAREA)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 1)\n answer = input(printSlow(\"> \", 0, False, 0.03, PADDINGAREA))\n scroll_text_up(PADDINGAREA, 1, 0, PADDINGAREA)\n console.clear_line(console_info.window_rectangle.bottom - 1, 2)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n if answer.lower() == \"!stats\" and safeZone == True:\n clear_screen()\n printSlow(big)\n printSlow(\n \"Remember you can view a list of commands in any safezone with !commands\",\n 0.5,\n )\n playerStats()\n clear_screen()\n elif answer.lower() == \"!statsmeaning\" and safeZone == True:\n clear_screen()\n printSlow(big)\n printSlow(\n \"Remember you can view a list of commands in any safezone with !commands\",\n 0.5,\n )\n printSlow(big)\n statMeaning()\n elif answer.lower() == \"!commands\" and safeZone == True:\n clear_screen()\n commands()\n else:\n scroll_text_up(PADDINGAREA, 1, 0, PADDINGAREA)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n return answer\n\n\ndef spacing(x1, x2, str1, str2):\n \"Function to calculate where to place a string in between two points\"\n return (x1 + ((x2 - x1 + 1) // 2)) - (len(str1) - 1) - ((len(str2) - 1) // 2) + 2\n\n\ndef monsterFight(monster, mainChar):\n \"Monster encounter function\"\n console = begin.console\n printSlow(f\"You encounter a {monster.name}!\", 0, True, 0.02, PADDINGMIDDLE)\n\n begin.class_art()\n setHP(mainChar.health, True, mainChar, mainChar.healthPoolmax, True)\n setMana(mainChar.mana, True, mainChar, mainChar.manaPoolmax, True)\n setStamina(mainChar.stamina, True, mainChar, mainChar.staminaPoolmax, True)\n setArmor(mainChar.armor, True, mainChar, mainChar.armorPoolmax, True)\n\n begin.monster_art(monster)\n setEnemyHP(monster, monster.health, True)\n setEnemyMana(monster, True)\n setEnemyStamina(monster, True)\n setEnemyArmor(monster, monster.armor, True)\n\n console.set_cursor_pos(0, 16)\n printSlow((\"█\" * console_info.window_rectangle.right), 0, False, 0.002)\n\n choice = qAnswer(\n \"What would you like to do? [1. Attack] [2. Try to Escape!]\",\n False,\n PADDINGMIDDLE,\n )\n while monster.health > 0:\n enemyCritChance = monster.agility + 5\n enemyDodgeChance = monster.agility + 5\n heroCritChance = mainChar.agility + 10\n heroDodgeChance = mainChar.agility + 10\n\n if choice == \"1\":\n roll = random.randint(1, 100)\n if roll > enemyDodgeChance:\n\n roll = random.randint(1, 100)\n if roll > heroCritChance:\n playerAttackMultiplier = 1\n\n elif roll <= heroCritChance:\n playerAttackMultiplier = 2\n\n localMonsterhealth = monster.health - (\n 3 + (mainChar.strength * playerAttackMultiplier - monster.armor)\n )\n\n if localMonsterhealth > 0:\n\n monster.health = monster.health - (\n 3 + (mainChar.strength * playerAttackMultiplier - monster.armor)\n )\n if playerAttackMultiplier == 1:\n printSlow(\n f\"You strike the {monster.name} with your sword, dealing {3 + (mainChar.strength * playerAttackMultiplier - monster.armor)} damage!\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n printSlow(\"\", 0, True, 0.02, PADDINGMIDDLE)\n\n elif playerAttackMultiplier == 2:\n printSlow(\n f\"You crit the {monster.name} with your sword for {3 + (mainChar.strength * playerAttackMultiplier - monster.armor)} damage; breaking one armor point!\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n printSlow(\"\", 0, True, 0.02, PADDINGMIDDLE)\n if monster.armor > 0:\n monster.armor = monster.armor - 1\n\n else:\n\n monster.health = 0\n printSlow(\n f\"You decapitate the {monster.name}. It drops to the ground in front of you.\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n break\n\n else:\n printSlow(\n f\"You swing your sword and miss the {monster.name}\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n printSlow(\"\", 0, True, 0.02, PADDINGMIDDLE)\n\n elif choice == \"2\":\n\n printSlow(\n \"- Escaping isn't possible right now, try again later! -\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n printSlow(\"\", 0, True, 0.02, PADDINGMIDDLE)\n\n else:\n\n printSlow(\n \"Theres no time for other matters, you must fight or run!\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n printSlow(\"\", 0, True, 0.02, PADDINGMIDDLE)\n\n if monster.health > 0:\n\n roll = random.randint(1, 100)\n if roll > heroDodgeChance:\n\n roll = random.randint(1, 100)\n if roll > enemyCritChance:\n enemyAttackMultiplier = 1\n\n elif roll <= enemyCritChance:\n enemyAttackMultiplier = 1.5\n\n localHerohealth = mainChar.health - (\n int(monster.strength * enemyAttackMultiplier - mainChar.armor)\n )\n\n if localHerohealth > 0:\n\n mainChar.health = mainChar.health - (\n int(monster.strength * enemyAttackMultiplier - mainChar.armor)\n )\n if enemyAttackMultiplier == 1:\n printSlow(\n f\"The {monster.name} attacks you, dealing {monster.strength * enemyAttackMultiplier - mainChar.armor} damage!\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n\n elif enemyAttackMultiplier == 1.5:\n printSlow(\n f\"The {monster.name} crits you for {int(monster.strength * enemyAttackMultiplier - mainChar.armor)} damage; breaking one armor point.\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n if mainChar.armor > 0:\n mainChar.armor = mainChar.armor - 1\n\n else:\n\n mainChar.health = 0\n printSlow(f\"You were defeated by: {monster.name}.\")\n t.sleep(1)\n printSlow(f\"Thank you very much for playing ARX!\")\n t.sleep(1)\n exit()\n\n elif roll <= heroDodgeChance:\n\n printSlow(\n f\"The {monster.name} tried to attack you and misses\",\n 0,\n True,\n 0.02,\n PADDINGMIDDLE,\n )\n\n choice = qAnswer(\n \"Now what? [1. Attack] [2. Try to Escape!]\", False, PADDINGMIDDLE\n )\n\n clear_screen()\n console.set_cursor_pos(0, 21)\n print(\" \" * console_info.window_rectangle.right)\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n clear_screen(PADDINGWIPEART)\n printSlow(f\"Wow, you defeated the {monster.name}\")\n printSlow(\"\")\n\n\n# -------------------------\n# Dialogue Functions Below:\n# -------------------------\n\n\ndef playerStats(mainChar):\n \"Shows player their stats\"\n console = begin.console\n printSlow(\n \"Lets look at your characters stats! (They are dependent on what class you pick!)\",\n 0.5,\n )\n printSlow(f\"► Your Strength is: {mainChar.strength}\")\n printSlow(f\"► Your Agility is: {mainChar.agility}\")\n printSlow(f\"► Your Intelligence is: {mainChar.intelligence}\")\n printSlow(f\"► Your Charisma is: {mainChar.charisma}\")\n confirm(1)\n\n\ndef statMeaning():\n \"Shows player what each player stat does\"\n console = begin.console\n printSlow(big)\n printSlow(\"Stats in Arx\")\n printSlow(small)\n printSlow(\"Strength:\")\n printSlow(\"Primary benefit(s): Weapon Damage\")\n printSlow(\"Additional benefit(s): Increases Inventory\")\n printSlow(\"\")\n printSlow(\"Agility:\")\n printSlow(\"Primary benefit(s): Increased Stamina pool\")\n printSlow(\"Additional benefit(s): Crit chance and Dodge chance\")\n printSlow(\"\")\n printSlow(\"Intelligence:\")\n printSlow(\"Primary benfit(s): Increased Mana pool\")\n printSlow(\"Additional benefit(s): Spell Damage and Heal Amount\")\n printSlow(\"\")\n printSlow(\"Charisma:\")\n printSlow(\"Primary benefit(s): Trade prices\")\n printSlow(\"Additional benefit(s): Chance to prevent negative effects\")\n printSlow(big)\n confirm()\n\n\n# ------------------------------------------\n# Stat Bar Setting and Icon Functions below:\n# ------------------------------------------\n\n\ndef setHP(current_hp, showStats, mainChar, max_hp=100, initialize=False):\n \"Sets Player Instance's Health and Updates On-Screen Display for it.\"\n console = begin.console\n mainChar._health = current_hp\n\n console.set_cursor_pos(40, 17)\n printSlow(\"Hero's Stats:\", 0, False)\n console.set_cursor_pos(35, 18)\n\n if showStats == True and initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(f\"[Health {current_hp: >3}/{max_hp}:\".ljust(14), end=\"\", flush=True)\n health_percentage_current = int(((current_hp / max_hp) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light red\")\n print(\" \" * health_percentage_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"red\")\n print(\" \" * (10 - health_percentage_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n elif showStats == True and initialize == True:\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(f\"[Health {current_hp: >3}/{max_hp}:\".ljust(14), 0, False)\n health_percentage_current = int(((current_hp / max_hp) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light red\")\n printSlow(\" \" * health_percentage_current, 0, False)\n\n console.set_text_color(\"bright white\", \"red\")\n printSlow(\" \" * (10 - health_percentage_current), 0, False)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setMana(current_mana, showStats, mainChar, max_mana=None, initialize=False):\n \"Sets Player Instance's Mana and Updates On-Screen Display for it.\"\n console = begin.console\n mainChar._mana = current_mana\n\n console.set_cursor_pos(35, 19)\n\n if showStats == True and initialize == False:\n\n if max_mana is None:\n\n max_mana = mainChar.manaPoolMax\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Mana {current_mana: >2}/{max_mana}:\".ljust(14), end=\"\", flush=True\n )\n mana_percent_current = int(((current_mana / max_mana) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light aqua\")\n print(\" \" * mana_percent_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"aqua\")\n print(\" \" * (10 - mana_percent_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n\n elif showStats == True and initialize == True:\n\n if max_mana is None:\n\n max_mana = mainChar.manaPoolmax\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(f\"[Mana {current_mana: >2}/{max_mana}:\".ljust(14), 0, False)\n mana_percent_current = int(((current_mana / max_mana) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light aqua\")\n printSlow(\" \" * mana_percent_current, 0, False)\n\n console.set_text_color(\"bright white\", \"aqua\")\n printSlow(\" \" * (10 - mana_percent_current), 0, False)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setStamina(\n current_stamina, showStats, mainChar, max_stamina=None, initialize=False\n):\n \"Sets Player Instance's Stamina and Updates On-Screen Display for it.\"\n console = begin.console\n mainChar._stamina = current_stamina\n console.set_cursor_pos(35, 20)\n\n if showStats == True and initialize == False:\n\n if max_stamina is None:\n\n max_stamina = mainChar.staminaPoolmax\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Stamina {current_stamina: >2}/{max_stamina}:\".ljust(14),\n end=\"\",\n flush=True,\n )\n stamina_percent_current = int(((current_stamina / max_stamina) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light green\")\n print(\" \" * stamina_percent_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"green\")\n print(\" \" * (10 - stamina_percent_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n\n elif showStats == True and initialize == True:\n\n if max_stamina is None:\n\n max_stamina = mainChar.staminaPoolmax\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\n f\"[Stamina {current_stamina: >2}/{max_stamina}:\".ljust(14), 0, False\n )\n stamina_percent_current = int(((current_stamina / max_stamina) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light green\")\n printSlow(\" \" * stamina_percent_current, 0, False)\n\n console.set_text_color(\"bright white\", \"green\")\n printSlow(\" \" * (10 - stamina_percent_current), 0, False)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setArmor(current_armor, showStats, mainChar, max_armor=10, initialize=False):\n \"Sets Player Instance's Armor and Updates On-Screen Display for it.\"\n console = begin.console\n mainChar._armor = current_armor\n console.set_cursor_pos(35, 21)\n\n if showStats == True and initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Armor {current_armor: >2}/{max_armor}:\".ljust(14), end=\"\", flush=True\n )\n armor_percentage_current = int(((current_armor / max_armor) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light yellow\")\n print(\" \" * armor_percentage_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"yellow\")\n print(\" \" * (10 - armor_percentage_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n\n elif showStats == True and initialize == True:\n \n console.set_text_color(\"bright white\", \"black\")\n printSlow(f\"[Armor {current_armor: >2}/{max_armor}:\".ljust(14), 0, False)\n armor_percentage_current = int(((current_armor / max_armor) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light yellow\")\n printSlow(\" \" * armor_percentage_current, 0, False)\n\n console.set_text_color(\"bright white\", \"yellow\")\n printSlow(\" \" * (10 - armor_percentage_current), 0, False)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\n# ---------------------------\n# ENEMIES SET FUNCTION BELOW:\n# ---------------------------\n\n\ndef setEnemyHP(monster, max_hp, initialize=False):\n \"Sets Enemy Instance's Health and Updates On-Screen Display for it.\"\n console = begin.console\n # TODO Set this to center monster.name relative to the len()\n console.set_cursor_pos(95, 17)\n printSlow(f\"{monster.name}'s Stats:\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_cursor_pos(90, 18)\n\n if initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(f\"[Health {monster.health: >3}/{max_hp}:\".ljust(14), end=\"\", flush=True)\n health_percentage_current = int(((monster.health / max_hp) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light red\")\n print(\" \" * health_percentage_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"red\")\n print(\" \" * (10 - health_percentage_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False)\n console.set_default_text_color()\n else:\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\n f\"[Health {monster.health: >3}/{max_hp}:\".ljust(14),\n 0,\n False,\n 0.02,\n PADDINGMIDDLE,\n )\n health_percentage_current = int(((monster.health / max_hp) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light red\")\n printSlow(\" \" * health_percentage_current,\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"red\")\n printSlow(\" \" * (10 - health_percentage_current),\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setEnemyMana(monster, initialize=False):\n \"Sets Enemy Instance's Mana and Updates On-Screen Display for it.\"\n console = begin.console\n console.set_cursor_pos(90, 19)\n\n if len(str(monster.mana)) == 2:\n spaces = \" \"\n elif len(str(monster.mana)) < len(str(monster.manaPoolmax)):\n spaces = \" \"\n else:\n spaces = \" \"\n\n if initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Mana{spaces}{monster.mana: >2}/{monster.manaPoolmax}:\".ljust(14),\n end=\"\",\n flush=True,\n )\n mana_percent_current = int(((monster.mana / monster.manaPoolmax) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light aqua\")\n print(\" \" * mana_percent_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"aqua\")\n print(\" \" * (10 - mana_percent_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n else:\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\n f\"[Mana{spaces}{monster.mana: >2}/{monster.manaPoolmax}:\".ljust(14),\n 0,\n False,\n 0.02,\n PADDINGMIDDLE,\n )\n mana_percent_current = int(((monster.mana / monster.manaPoolmax) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light aqua\")\n printSlow(\" \" * mana_percent_current, 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"aqua\")\n printSlow(\" \" * (10 - mana_percent_current),\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setEnemyStamina(monster, initialize=False):\n \"Sets Enemy Instance's Stamina and Updates On-Screen Display for it.\"\n console = begin.console\n console.set_cursor_pos(90, 20)\n\n if initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Stamina {monster.stamina: >2}/{monster.staminaPoolmax}:\".ljust(14),\n end=\"\",\n flush=True,\n )\n stamina_percent_current = int(\n ((monster.stamina / monster.staminaPoolmax) * 100) // 10\n )\n\n console.set_text_color(\"bright white\", \"light green\")\n print(\" \" * stamina_percent_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"green\")\n print(\" \" * (10 - stamina_percent_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n else:\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\n f\"[Stamina {monster.stamina: >2}/{monster.staminaPoolmax}:\".ljust(14),\n 0,\n False,\n 0.02,\n PADDINGMIDDLE,\n )\n stamina_percent_current = int(\n ((monster.stamina / monster.staminaPoolmax) * 100) // 10\n )\n\n console.set_text_color(\"bright white\", \"light green\")\n printSlow(\" \" * stamina_percent_current, 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"green\")\n printSlow(\" \" * (10 - stamina_percent_current),\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef setEnemyArmor(monster, max_armor, initialize=False):\n \"Sets Enemy Instance's Armor and Updates On-Screen Display for it.\"\n console = begin.console\n console.set_cursor_pos(90, 21)\n\n if initialize == False:\n\n console.set_text_color(\"bright white\", \"black\")\n print(\n f\"[Armor {monster.armor: >2}/{max_armor}:\".ljust(14), end=\"\", flush=True\n )\n armor_percentage_current = int(((monster.armor / max_armor) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light yellow\")\n print(\" \" * armor_percentage_current, end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"yellow\")\n print(\" \" * (10 - armor_percentage_current), end=\"\", flush=True)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n else:\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\n f\"[Armor {monster.armor: >2}/{max_armor}:\".ljust(14),\n 0,\n False,\n 0.02,\n PADDINGMIDDLE,\n )\n armor_percentage_current = int(((monster.armor / max_armor) * 100) // 10)\n\n console.set_text_color(\"bright white\", \"light yellow\")\n printSlow(\" \" * armor_percentage_current,\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"yellow\")\n printSlow(\" \" * (10 - armor_percentage_current),\n 0, False, 0.02, PADDINGMIDDLE)\n\n console.set_text_color(\"bright white\", \"black\")\n printSlow(\"]\", 0, False, 0.02, PADDINGMIDDLE)\n console.set_default_text_color()\n console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n\n\ndef clearEnemyStats():\n pass\n\n\n# console = begin.console\n# console.set_cursor_pos(60, 0)\n# print(\" \")\n# console.set_cursor_pos(55, 1)\n# print(\" \")\n# console.set_cursor_pos(55, 2)\n# print(\" \")\n# console.set_cursor_pos(55, 3)\n# print(\" \")\n# console.set_cursor_pos(55, 4)\n# print(\" \")\n# console.set_cursor_pos(0, console_info.window_rectangle.bottom - 3)\n","repo_name":"VladDoesCode/Arx","sub_path":"Code/universalFunctions.py","file_name":"universalFunctions.py","file_ext":"py","file_size_in_byte":28083,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"8193117224","text":"from django.db import models\n\n# Create your models here.\n\nclass BookInfo(models.Model):\n\n # 自动会我们创建主键, 主键的字段名为 id\n #书籍名\n name = models.CharField(max_length=20)\n pub_date = models.DateField(null=True)\n readcount = models.IntegerField(default=0)\n commentcount = models.IntegerField(default=0)\n is_delete = models.BooleanField(default=False)\n\n #字段名不能是 python, mysql 的关键字\n # 字段名不能有 两个 __\n\n # 字段名 , 字段类型 ,字段 选项\n class Meta:\n #元选项\n #用于改变数据库的相关信息\n #修改表名字\n db_table = 'bookinfo'\n\n def __str__(self):\n return self.name\n\n# 准备人物列表信息的模型类\nclass PeopleInfo(models.Model):\n GENDER_CHOICES = (\n (0, 'male'),\n (1, 'female')\n )\n name = models.CharField(max_length=20, verbose_name='名称')\n gender = models.SmallIntegerField(choices=GENDER_CHOICES, default=0, verbose_name='性别')\n description = models.CharField(max_length=200, null=True, verbose_name='描述信息')\n book = models.ForeignKey(BookInfo, on_delete=models.CASCADE, verbose_name='图书') # 外键\n is_delete = models.BooleanField(default=False, verbose_name='逻辑删除')\n\n class Meta:\n db_table = 'peopleinfo'\n verbose_name = '人物信息'\n\n def __str__(self):\n return self.name\n","repo_name":"songaiwen/Django2","sub_path":"bookmanager02/book/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42936430734","text":"import numpy as np\nimport rootfinding as rf\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n return np.exp(x) - x - 2\n\n\nxLO = 0\nxHI = 3\n\nx1, _ = rf.bisection(f, xLO, xHI)\nprint(\"Bisection solution in interval [{:.2f}, {:.2f}] is {:.4f}\".format(xLO, xHI, x1))\n\nprint(\"Check solution: f({:.4f}) = {:.4f}\".format(x1, f(x1)))\n\nxLO = -3\nxHI = 0\nx2, _ = rf.bisection(f, xLO, xHI)\nprint(\"Bisection solution in interval [{:.2f}, {:.2f}] is {:.4f}\".format(xLO, xHI, x2))\nprint(\"Check solution: f({:.4f}) = {:.4f}\".format(x2, f(x2)))\n\nx = np.linspace(-3, 3, num=1001)\nplt.plot(x, f(x))\nplt.grid(\"on\")\nplt.show()\n","repo_name":"stevenweller/ENGG1003-public","sub_path":"lab_3/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"915537233","text":"import pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport locale\nimport pypyodbc\nimport math\nlocale.setlocale(locale.LC_ALL, '')\n\nfilepath = '../DatiVibrazioni/Dati_Lcf.xlsx'\n#filepath = '../DatiVibrazioni/test.xlsx'\n\ndef main():\n\n connection = pypyodbc.connect('Driver={SQL Server};'\n 'Server=localhost;'\n 'Database=marco_db;'\n 'uid=marco;pwd=CiaoCiao91')\n cursor = connection.cursor()\n SQLCommand = (\"INSERT INTO VW_RAM_APP_VIBRA \"\n \"(tipologia_controllo, impianto, apparecchiatura, strumento, risoluzione_temporale, timestamp, value, \"\n \"value_min, allerta, allerta_blocco) \"\n \"VALUES (?,?,?,?,?,?,?,?,?,?)\")\n\n print(\"leggo il file \"+filepath)\n\n monitoraggio_vibrazioni = pd.read_excel(filepath)\n monitoraggio_vibrazioni['TIMESTAMP'] = pd.to_datetime(monitoraggio_vibrazioni['TIMESTAMP'], format=\"%d-%b-%y %H:%M\")\n\n number_of_rows = len(monitoraggio_vibrazioni.index)\n\n #scorri righe\n for i in range(0, number_of_rows):\n if i%1000 == 0:\n print(str(i)+\"/\"+str(number_of_rows))\n\n row = monitoraggio_vibrazioni.iloc[i]\n\n tipologia_controllo = row['TIPOLOGIA_CONTROLLO']\n impianto = row['IMPIANTO']\n apparecchiatura = row['APPARECCHIATURA']\n strumento = row['STRUMENTO']\n risoluzione_temporale = row['RISOLUZIONE_TEMPORALE']\n timestamp = row['TIMESTAMP']\n value = row['VALUE']\n if math.isnan(value):\n value = None\n value_min = row['VARIANZA']\n if math.isnan(value_min):\n value_min = None\n allerta = row['ALLERTA']\n if str(allerta) == 'nan':\n allerta = None\n\n allerta_blocco = row['ALLARME_BLOCCO']\n if math.isnan(allerta_blocco):\n allerta_blocco = None\n\n values = [tipologia_controllo, impianto, apparecchiatura, strumento, risoluzione_temporale,\n timestamp, value, value_min, allerta, allerta_blocco]\n #print(SQLCommand)\n #print(values)\n\n cursor.execute(SQLCommand,values)\n\n connection.commit()\n connection.close()\n\nif __name__ == '__main__':\n main()","repo_name":"MarcoCompagnoni/python","sub_path":"SQLServer/populateSQLServer.py","file_name":"populateSQLServer.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3728631633","text":"# 각 하나의 집합 만들기\ndef make_set(x):\n p[x] = x\n\n# 대표자 찾기\ndef find_set(x):\n if p[x] == x : return x\n else: return find_set(p[x])\n\n# 두 집합 합치기(여기서 대표자는 x)\ndef union(x,y):\n px = find_set(x)\n py = find_set(y)\n if rank[px] > rank[py]:\n p[py] = px\n else:\n p[px] = py\n if rank[px] == rank[py]:\n rank[py] += 1\n # print(p)\n\nimport sys\nsys.stdin = open(\"Union_그룹나누기.txt\")\n\nfor tc in range(int(input())):\n N, M = map(int, input().split())\n group = list(map(int, input().split()))\n p = [0 for _ in range(N+1)]\n rank = [0 for _ in range(N+1)]\n\n for i in range(1, N+1):\n make_set(i)\n\n for i in range(M):\n x, y = group[2*i], group[2*i+1]\n union(x, y)\n\n # p를 구할 때 뒤에서 부모가 바뀔 경우, 그 앞쪽의 부모도 바꿔줘야 하기에, p_set을 다시 한 번 더 구해야 함. \n # print(set(p))\n p_set = set()\n for i in range(1, N+1):\n p_set.add(find_set(i))\n # print(p_set)\n print(\"#{} {}\".format(tc+1, len(p_set)))","repo_name":"yeonjudkwl/Algorithm","sub_path":"swea/Union_그룹나누기.py","file_name":"Union_그룹나누기.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38092800596","text":"from itertools import combinations\nfrom rouge import Rouge\nimport numpy as np\n\ndef bestSummary(sent_list, ref_summary, c = 3):\n rouge = Rouge()\n best_summary = ' '.join(sent_list[0:3]);\n\n best_rouge = rouge.get_scores(best_summary, ref_summary)[0]['rouge-1']['f'];\n best_ind = [0,1,2];\n\n counter = 0;\n\n #essentially, if the article is too long, we only probe the first 20 sentences.\n combination_bound = min(9, len(sent_list));\n\n for c in range(2,5):\n sent_inds = list(range(combination_bound))\n for combo in combinations(sent_inds , c):\n summ_prop = ' '.join([sent_list[i] for i in combo]);\n scores = rouge.get_scores(summ_prop, ref_summary)[0];\n # print(scores)\n r1f = scores['rouge-1']['f']\n if(r1f > best_rouge):\n best_rouge = r1f;\n best_summary = summ_prop\n best_ind = combo;\n counter+=1;\n # if(counter>200):\n # break;\n\n return best_summary, best_rouge, best_ind;\n\n## what if we search the document for maximal sentences based on rouge...then combine the top 3.\ndef bestSummary_individual(sent_list, ref_summary, cmax = 10):\n rouge = Rouge()\n ind_rouge = [];\n for sent in sent_list:\n scores = rouge.get_scores(sent, ref_summary)[0];\n ind_rouge.append(scores['rouge-1']['f'])\n ind_rouge = np.array(ind_rouge)\n\n best_summary = '';\n best_rouge = 0; best_inds = [];\n lower_bound = 3;\n if(len(sent_list) < lower_bound):\n lower_bound = len(sent_list);\n if(cmax > len(sent_list)):\n cmax = len(sent_list);\n for summary_length in range(lower_bound,cmax+1):\n ind = np.argpartition(ind_rouge, -1*summary_length)[-1*summary_length:]\n #print(ind)\n summary_prop = ' '.join([sent_list[i] for i in ind]);\n scores = rouge.get_scores(summary_prop, ref_summary)[0];\n rf1 = scores['rouge-1']['f'];\n if(rf1 > best_rouge):\n best_rouge = rf1;\n best_summary = summary_prop\n best_inds = ind;\n return best_summary, best_rouge, best_inds;\n\ndef bestSummary_iterative(sent_list, ref_summary, cmax = 10, max_probe_length = 50, max_summary_length =3):\n '''\n iteratively tries the best sentences.\n :param sent_list:\n :param ref_summary:\n :param cmax:\n :return:\n '''\n rouge = Rouge()\n\n best_summary = '';\n best_rouge = 0; best_inds = [];\n\n #termination condition: if we can't improve the score adding in the ith sentence\n cur_best = [];\n for i in range(1, cmax+1):\n best_ind = -1;\n best_rouge = 0; ## need to get three\n for j in range(0, min(max_probe_length, len(sent_list))):\n if(j in best_inds): continue; #no duplicates\n new_sum = ' '.join(cur_best+[sent_list[j]]);\n score = rouge.get_scores(new_sum, ref_summary)[0]['rouge-1']['f'];\n if(score>best_rouge):\n best_rouge = score;\n best_ind = j;\n if(best_ind!= -1):\n cur_best.append(sent_list[best_ind]);\n best_inds.append(best_ind);\n if(len(best_inds) == max_summary_length):\n break;\n # else:\n # break;\n #best_summary = ' '.join(cur_best);\n return cur_best, best_rouge, best_inds;\n","repo_name":"yijunj/Stanford-CS224N-Summarization","sub_path":"artifical_summary/sentence_combination.py","file_name":"sentence_combination.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72623605272","text":"n = int(input())\n\nans = -1\ncnt_5, rest_5 = divmod(n, 5)\nif rest_5 == 3:\n ans = cnt_5 + 1\n # print(cnt_5 + 1)\nelif rest_5 == 0:\n ans = cnt_5\n # print(cnt_5)\nelse:\n if cnt_5 > 0:\n temp = rest_5 + 5\n else:\n temp = n\n while cnt_5 > 0:\n cnt_5 -= 1\n cnt_3, rest_3 = divmod(temp, 3)\n if rest_3 == 0:\n ans = cnt_5 + cnt_3\n break\n temp += 5\n else:\n ans = -1\nprint(ans)","repo_name":"Leeyounwoo/Algorithm","sub_path":"BAEKJOON/단계별로 풀기/08. 기본 수학 1/s8.py","file_name":"s8.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34616250164","text":"import click\nfrom typing import List\nfrom src.exporter import CeleryMetricsExporter\n\n@click.command(context_settings={\"auto_envvar_prefix\": \"CELERY_EXPORTER\"})\n\n@click.option(\n \"--broker-url\", required=True, help=\"The url to the broker, e.g redis://1.2.3.4\"\n)\n@click.option(\n \"--port\",\n type=int,\n default=9540,\n show_default=True,\n help=\"The port the exporter will listen on\",\n)\n@click.option(\n \"-q\",\n \"--queue-names\",\n multiple=True,\n required=True,\n help=\"The queues to track\",\n)\n@click.option(\n \"-r\",\n \"--refresh\",\n type=int,\n default=30,\n show_default=True,\n help=\"How often to refresh metrics in seconds\",\n)\n\n\ndef cli(broker_url: str, port: int, queue_names: List[str], refresh: int):\n ctx = click.get_current_context()\n params = ctx.params\n broker_url = params[\"broker_url\"]\n port = params[\"port\"]\n queue_names = params[\"queue_names\"]\n metrics_refresh = params[\"refresh\"]\n exporter = CeleryMetricsExporter(broker_url, queue_names, port, metrics_refresh)\n exporter.run()\n\n\n","repo_name":"AllenInstitute/CeleryMetricsExporter","sub_path":"src/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9675255930","text":"from math import sin, cos, tan, radians\n\nangulo = float(input('Digite um ângulo: '))\nseno = sin(radians(angulo))\ncosseno = cos(radians(angulo))\ntangente = tan(radians(angulo))\n\nprint(f'O seno do ângulo {angulo} é: {seno:.2f}')\nprint(f'O cosseno do ângulo {angulo} é: {cosseno:.2f}')\nprint(f'A tangente do ângulo {angulo} é: {tangente:.2f}')\n\n#RADIANS serve pra converter de graus pra radianos, porque as funções seno, cosseno e tangente recebem valores em radianos\n","repo_name":"leonardo-lopes-br/estudos_gerais","sub_path":"python/CursoemVideo/exercicios/ex018-sen-cos-tan.py","file_name":"ex018-sen-cos-tan.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9609480235","text":"from local.imports import *\nfrom local.test import *\nfrom local.core import *\nfrom local.data.pipeline import *\nfrom local.data.external import *\nfrom local.notebook.showdoc import show_doc\nfrom local.data.core import show_titled_image, get_image_files, GrandparentSplitter, ImageItem, parent_label\nfrom PIL import Image\n\n# ## Categorize -\n# export\nclass Categorize(Transform):\n \"\"\"\n \"Reversible transform of category string to `vocab` id\"\n\n purpose:\n - how to do category transformation for labels?\n - of course, need to create a subclass of Transform\n - it has its own __init__, encodes, decodes, setups\n 1. __init__(self, vocab, train_attr, subset_idx, mask, is_tuple):\n 1.0 inherit and introduce unique attributes for `Categorize`\n 1.1 `self.vocab`, `self.train_attr`, `self.subset_idx`\n 1.2 `self.o2i` is derived from `self.vocab`\n 2. setups(self, dsrc):\n 2.1 during the setup period, prepare self.vocab, self.o2i from `dsrc` with `self.train_attr` or `self.subset_idx`\n 3. encodes(self, o):\n 3.1 from unique cateory to idx, return idx\n 3.2 or just return the category\n 4. decodes(self, o):\n 4.1 return self.vocab[o]\n \"\"\"\n order,assoc=1,Item\n def __init__(self, vocab=None, train_attr=\"train\", subset_idx=None, mask=None, is_tuple=None):\n super().__init__(mask=mask,is_tuple=is_tuple)\n self.vocab,self.train_attr,self.subset_idx = vocab,train_attr,subset_idx\n self.o2i = None if vocab is None else {v:k for k,v in enumerate(vocab)}\n\n def setups(self, dsrc):\n if not dsrc: return\n if self.subset_idx is not None: dsrc = dsrc.subset(self.subset_idx)\n elif self.train_attr: dsrc = getattr(dsrc,self.train_attr)\n self.vocab,self.o2i = uniqueify(dsrc, sort=True, bidir=True)\n\n def encodes(self, o): return self.o2i[o] if self.o2i else o\n def decodes(self, o): return self.vocab[o]\n# ### End-to-end dataset example with MNIST\npath = untar_data(URLs.MNIST_TINY)\n(path/'train').ls()\nitems = get_image_files(path);items\nsub = items[1,2,3, -3,-2,-1].mapped(parent_label); sub[0]\nlabeltfm = Categorize(subset_idx=[0,1,2,3,4])\ntfmlist = TfmdList(sub, Pipeline([labeltfm]));tfmlist\n# use TfmOver.piped([]) is a must here\ntfmlist = TfmdList(sub, TfmOver.piped([labeltfm]));\ntfmlist\ntfmlist[0] # this is encoding from 3, 7 to 0 and 1\ntfmlist.decode_at(1)\ntfmlist.show_at(0)\nlabeltfm.vocab\nlabeltfm.o2i\n\n########### complex example\n# split data\nsplitter = GrandparentSplitter()\nsplits = splitter(items)\ntrain,valid = (items[i] for i in splits)\ntrain,valid\n# prepare tfms\ntimg = Transform(Image.open, # encodes\n assoc=ImageItem(cmap=\"Greys\", figsize=(1,1)))# assoc=Item no more\ntimg2tensor = Transform(compose(array,tensor))# this tfm is array+tensor\ntfms = [[timg,timg2tensor,partial(torch.unsqueeze,dim=0)],# group tfms 1 for x\n [parent_label, Categorize(subset_idx=splits[0])]] # group tfms 2 for y\n\n\n# important! how to understand TfmOver.piped and TfmdList(items, tfm)\ntfm = TfmOver.piped(tfms)\n# this func created `a`, the duplicate Transform, so that when an item go through this tfm, it returns two copies\n# a = [functools.partial(<function replicate at 0x1258e4bf8>, match=(#2) [(#3) [<function open at 0x122cf8048>,<function compose.<locals>._inner at 0x125820378>,functools.partial(<built-in method unsqueeze of type object at 0x113876950>, dim=0)], (#2) [<function parent_label at 0x1259180d0>,<class '__main__.Categorize'>]]),\n# then this func turn `tfms` into `TfmOver`, i.e., make 2 pipelines from `tfms`\n# b = TfmOver((#2) [[],[]])]}\n# finally wrap a, b into a single pipeline, so that they linked in order\n\n# so that when `TfmdList(items, tfm)`, it is to bind items to tfm and set up tfm ready to use, so that datasets[0] can actually apply tfm to items[0]\n# 0. to setup all pipelines and Transforms, will do the following\n# 1. set up a, so when `items[0]` it outputs two copies of items[0]\n# 2. start to set up b => set up b[0] => TfmOver.setup to Transform.setup to Pipeline.setups to Pipeline.add to loop through and set up each tfm inside b[0], at the end, `items[0]` can do transforms from image to tensor => set up b[1] => TfmOver.setup to Transform.setup to Pipeline.setups to Pipeline.add to loop through and set up each tfm inside b[1], at the end, `items[1]` can do transforms from image to label, meanwhile, `Categorize.setups` get `self.vocab` and `self.o2i` ready based on `items` and `subset_idx`\n# 3. apply each pipeline to each copy\ndatasets = TfmdList(items, tfm)\n# both methods above just to get tfms ready, no application of tfms yet\n\ndatasets\n\n# only in the following method\n# it will zip two copies of items[0] with two pipelines from b\n# TfmOver.__call__ will do this zip work\ndatasets[0]\n# NB: `DataSource` is an easier way to handle this common case\ntrain_ds,valid_ds = map(datasets.subset, splits)\n\nx,y = train_ds[3]\nxd,yd = train_ds.decode_at(3)\ntest_eq(parent_label(train[3]),yd)\ntest_eq(array(Image.open(train[3])),xd[0])\n\ntrain_ds.show_at(3);\n","repo_name":"EmbraceLife/fastai_treasures","sub_path":"my_workstation/my-v2/data.core.Categorize.py","file_name":"data.core.Categorize.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"5"} +{"seq_id":"1730776710","text":"import unittest\n\nfrom mock import MagicMock\n\nfrom steinlib.exceptions import SteinlibParsingException\nfrom steinlib.instance import SteinlibInstance\nfrom steinlib.parser import RootEofParser\n\n\nclass TestRootEofParser(unittest.TestCase):\n graph = SteinlibInstance()\n\n def test_valid_eof(self):\n upper_case_eof = 'EOF'\n result = RootEofParser.matches(upper_case_eof,\n TestRootEofParser.graph)\n self.assertIsNotNone(result)\n\n def test_case_insensitive_eof(self):\n mixed_case_eof = 'eOf'\n result = RootEofParser.matches(mixed_case_eof,\n TestRootEofParser.graph)\n self.assertIsNotNone(result)\n\n def test_invalid_eof(self):\n invalid_eof = 'begin'\n with self.assertRaises(SteinlibParsingException):\n RootEofParser.matches(invalid_eof,\n TestRootEofParser.graph)\n\n def test_right_eof_callback_is_executed(self):\n mock_graph = MagicMock()\n valid_eof = 'EOF'\n _ = RootEofParser.matches(valid_eof, mock_graph)\n mock_graph.eof.assert_called_with('EOF', ())\n","repo_name":"leandron/steinlib","sub_path":"test/test_eof_parser.py","file_name":"test_eof_parser.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"20303123222","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\ndef solution(roman):\n roman_by_arabic = {\n \"I\": 1,\n \"V\": 5,\n \"X\": 10,\n \"L\": 50,\n \"C\": 100,\n \"D\": 500,\n \"M\": 1000,\n }\n\n total_number = 0\n last_num = -1\n\n for c in reversed(roman):\n current_number = roman_by_arabic[c]\n\n if current_number >= last_num:\n total_number += current_number\n else:\n total_number -= current_number\n\n last_num = current_number\n\n return total_number\n\n\nif __name__ == \"__main__\":\n assert solution(\"XXI\") == 21\n assert solution(\"XIX\") == 19\n assert solution(\"MDCLXVI\") == 1666\n","repo_name":"gil9red/SimplePyScripts","sub_path":"roman_numerals_decoder.py","file_name":"roman_numerals_decoder.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"16876652333","text":"import numpy as np\r\nfrom sklearn import manifold\r\nimport matplotlib.pyplot as plt\r\n\r\n#距離行列の読み込み\r\ndatum = np.loadtxt(\"mds_clu.csv\",delimiter=\",\",usecols=range(1,174))\r\n\r\nmds = manifold.MDS(n_components=2, dissimilarity=\"precomputed\", random_state=6)\r\n\r\nprint(datum)\r\n\r\n\r\npos = mds.fit_transform(datum)\r\n\r\nlabels = np.genfromtxt(\"mds_clu.csv\",delimiter=\",\",usecols=0,dtype=str)\r\n\r\nprint(labels)\r\n\r\n\r\n#図の詳細設定\r\nplt.figure()\r\n \r\nangle = 120.\r\ntheta = (angle/180.) * np.pi\r\n \r\nrotMatrix = np.array([[np.cos(theta), -np.sin(theta)], \r\n [np.sin(theta), np.cos(theta)]])\r\nrevMatrix = np.array([[-1, 0], [0, 1]])\r\n \r\nfixed_pos = rotMatrix.dot(revMatrix.dot(pos.T)).T\r\nplt.scatter(fixed_pos[:, 0], fixed_pos[:, 1], marker = 'o')\r\n \r\nfor label, x, y in zip(labels, fixed_pos[:, 0], fixed_pos[:, 1]):\r\n plt.annotate(\r\n label,\r\n xy = (x, y), xytext = (60, 10),\r\n textcoords = 'offset points', ha = 'right', va = 'bottom',\r\n # bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),\r\n #arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')\r\n )\r\n \r\nplt.show()\r\n","repo_name":"keisuke-ito/Python","sub_path":"nlp/mds_test.py","file_name":"mds_test.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33393050580","text":"import json\nimport logging\nimport numpy as np\n\nfrom scipy.sparse._csr import csr_matrix\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import Normalizer\n\n\nclass TextClusterer:\n def __init__(self, n_clusters: int = 3) -> None:\n \"\"\"Initializes TfidfVectorizer, KMeans and LSA dimensionality reducers\n\n Args:\n n_clusters (int, optional): Number of clusters. Defaults to 3.\n \"\"\"\n self.vectorizer = TfidfVectorizer(stop_words=\"english\")\n self.kmeans = KMeans(n_clusters=n_clusters, max_iter=100, random_state=0)\n self.lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False))\n\n def generate_kmeans(self, tf_idf_vectors: np.ndarray) -> np.ndarray:\n \"\"\"Generates k-means clustering\n\n Args:\n tf_idf_vectors (ndarray): Array of vectorized and dimension-reduced tf-idf vectors\n\n Returns:\n ndarray: Array of clustered tf-idf-vectors\n \"\"\"\n return self.kmeans.fit_transform(tf_idf_vectors)\n\n def vectorize_text(self, raw_data: dict) -> csr_matrix:\n \"\"\"Converts raw text to tf_idf vectors\n\n Args:\n raw_data (dict): Raw text\n\n Returns:\n csr_matrix: tf-idf vectors\n \"\"\"\n return self.vectorizer.fit_transform([raw_data[url] for url in raw_data])\n\n def reduce_dimensions(self, vectorized_text: csr_matrix) -> np.ndarray:\n \"\"\"Reduces dimensionality of vectorized text\n\n Args:\n vectorized_text (csr_matrix): vectorized text\n\n Returns:\n ndarray: reduced-dimension vectorized text\n \"\"\"\n return self.lsa.fit_transform(vectorized_text)\n\n def generate_stats(self, filename: str) -> None:\n \"\"\"Outputs statistics of cluster centroids and top 20 terms in centroids\"\"\"\n output_json = self._generate_stats_json(20)\n print(output_json)\n self.save_to_output_file(filename, output_json)\n\n def classify_documents_by_cluster(self, raw_text: dict) -> dict:\n \"\"\"Generates dictionary of documents with cluster label and raw text for AFINN analysis\n\n Args:\n raw_text (dict): raw_text\n\n Returns:\n dict: raw_text with cluster label\n \"\"\"\n output = {}\n doc_cluster_labels = self.kmeans.labels_\n\n for i, key in enumerate(raw_text):\n output[key] = {\"cluster\": int(doc_cluster_labels[i]), \"raw\": raw_text[key]}\n return output\n\n def save_to_output_file(self, filename: str, data: dict) -> None:\n \"\"\"Saves output json to file\n\n Args:\n filename (str): filename\n \"\"\"\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n output = json.dumps(data)\n file.write(output)\n logging.info(\n f\"{'Cluster stats' if 'kmeans' in filename else 'Document clusters'} written to {filename}.\"\n )\n\n def get_raw_text(self, filename: str) -> dict:\n \"\"\"Get raw text from file\n\n Args:\n filename (str): filename\n\n Returns:\n dict: dictionary of raw text by URL\n \"\"\"\n data = {}\n try:\n with open(filename, \"r\") as file:\n data = json.load(file)\n except FileNotFoundError:\n logging.error(f\"Invalid filename: {filename}\")\n return data\n\n def _generate_stats_json(self, k: int = 10) -> dict:\n \"\"\"Generates output dictionary in JSON format\n\n Args:\n k (int): Top k terms. Default is 10.\n\n Returns:\n dict: Cluster n mapped to top k terms.\n \"\"\"\n num_top_terms = k\n output_json = {}\n\n ordered_centroids = self._generate_ordered_centroids()\n terms = self.vectorizer.get_feature_names_out()\n\n for i in range(self.kmeans.n_clusters):\n output_json[f\"cluster_{i}\"] = [\n terms[j] for j in ordered_centroids[i, :num_top_terms]\n ]\n return output_json\n\n def _generate_ordered_centroids(self) -> np.ndarray:\n \"\"\"Generate ordered cluster centroids\n\n Returns:\n np.ndarray: Sorted cluster centroids\n \"\"\"\n original_space_centroids = self.lsa[0].inverse_transform(\n self.kmeans.cluster_centers_\n )\n return original_space_centroids.argsort()[:, ::-1]\n","repo_name":"john-s-lin/nonsense-sentiment-scraper","sub_path":"src/clustering/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39152173709","text":"#%%\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport sys\r\nimport pickle\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.metrics import accuracy_score\r\npath = 'C:/Users/jsh/Desktop/game'\r\nsys.path.append(path)\r\n#%%\r\ntrain_data = pd.read_csv(path+'/total/train0910_pca.csv')\r\ntest_data = pd.read_csv(path+'/total/test0910_pca.csv')\r\n#%%\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\n\r\nsplit = StratifiedShuffleSplit(n_splits=1, test_size = 0.2, random_state=123)\r\nfor train_index, valid_index in split.split(train_data, train_data[\"label\"]):\r\n train,valid = train_data.loc[train_index], train_data.loc[valid_index]\r\n feats = [f for f in train_data.columns if f not in ['acc_id','label']]\r\n train_x=train[feats]\r\n train_y=train['label']\r\n valid_x=valid[feats]\r\n valid_y=valid['label']\r\n#%%\r\npaste_clf = RandomForestClassifier(n_estimators = 200,\r\n random_state=42,\r\n n_jobs = -1,\r\n verbose=10,\r\n bootstrap=False)\r\npaste_clf.fit(train_x,train_y)\r\npickle.dump(paste_clf, open(\"rf.pickle\", \"wb\"))\r\n#%%\r\npred_valid_label = paste_clf.predict(valid_x)\r\nprint(paste_clf.__class__.__name__, accuracy_score(valid_y,pred_valid_label))\r\n#%%\r\nbag_clf = RandomForestClassifier(n_estimators = 300,\r\n random_state=42,\r\n n_jobs = -1,\r\n verbose=10,\r\n bootstrap=True,\r\n oob_score=True,\r\n )\r\nbag_clf.fit(train_x,train_y)\r\npickle.dump(bag_clf, open(\"bag_rf.pickle\", \"wb\"))\r\n#%%\r\npred_valid_label = bag_clf.predict(valid_x)\r\nprint(bag_clf.__class__.__name__, accuracy_score(valid_y,pred_valid_label))\r\n\r\n#%%\r\npaste_clf = ExtraTreesClassifier(n_estimators = 200,\r\n random_state=42,\r\n n_jobs = -1,\r\n verbose=10,\r\n bootstrap=False,\r\n oob_score=False\r\n )\r\npaste_clf.fit(train_x,train_y)\r\npickle.dump(paste_clf, open(\"extra.pickle\", \"wb\"))\r\n#%%\r\npred_valid_label = paste_clf.predict(valid_x)\r\nprint(paste_clf.__class__.__name__, accuracy_score(valid_y,pred_valid_label))\r\n\r\n#%%\r\nbag_clf = ExtraTreesClassifier(n_estimators = 400,\r\n random_state=42,\r\n n_jobs = -1,\r\n verbose=10,\r\n bootstrap=True,\r\n oob_score=True\r\n )\r\nbag_clf.fit(train_x,train_y)\r\npickle.dump(bag_clf, open(\"bag_extra.pickle\", \"wb\"))\r\n#%%\r\npred_valid_label = bag_clf.predict(valid_x)\r\nprint(bag_clf.__class__.__name__, accuracy_score(valid_y,pred_valid_label))\r\n\r\n#%%\r\nclf2 = pickle.load(open(\"rf.pickle\", \"rb\"))\r\npred_valid_label=list(clf2.predict(valid_x))\r\nprint('Accuracy_score %.6f' % accuracy_score(valid_y, pred_valid_label))\r\n\r\nfeats = [f for f in train_data.columns if f not in ['acc_id','label']]\r\nimportance_data = pd.DataFrame()\r\nimportance_data[\"feature\"] = feats\r\nimportance_data[\"importance\"] = clf2.feature_importances_\r\n\r\n#%%\r\nfrom sklearn.metrics import confusion_matrix\r\n#2month, month, retained, week 순 입니다.\r\ncm=confusion_matrix(valid_y,pred_valid_label)\r\nplt.matshow(cm,cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"mmtos/bigcon","sub_path":"game/3. RF_last.py","file_name":"3. RF_last.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25689439248","text":"import math\nimport random\nimport sys\nimport time\n\nimport pygame\n\nfrom . import GRID_SIZE, SCREEN_HEIGHT, SCREEN_WIDTH, Layer, Point, font\nfrom .enemy import Enemy\nfrom .exit import Exit\nfrom .floor import Floor\nfrom .food import Food\nfrom .player import Player\nfrom .wall import OuterWall, Wall\n\n\nclass DayTransition(object):\n\n def __init__(self, day, dead=False):\n self.day = day\n self.dead = dead\n self.start = time.time() * 1000\n\n def draw(self, surface):\n if self.dead:\n text = text = font.render(\"You starved on day {}\".format(self.day), True, (255, 255, 255))\n else:\n text = font.render(\"Day {}\".format(self.day), True, (255, 255, 255))\n surface.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2, SCREEN_HEIGHT / 2 - text.get_height() / 2))\n\n\nclass Gui(object):\n layer = Layer.GUI\n\n def __init__(self, position):\n self.position = position\n self.action = \"\"\n self.reset_action = False\n\n def update(self):\n if self.action and self.reset_action:\n self.action = \"\"\n self.reset_action = False\n elif self.action:\n self.reset_action = True\n\n def draw(self, surface):\n self.text = font.render(\"{} Food: {}\".format(self.action, scene.player.food), True, (255, 255, 255))\n surface.blit(self.text, (self.position.x - self.text.get_width() / 2,\n self.position.y))\n\n\nclass Scene(object):\n wall_count = (5, 9)\n food_count = (1, 5)\n\n def __init__(self, level):\n self.level = level\n self.player = None\n self.gui = None\n self.transition = None\n self.transition_time = None\n self.reset(level)\n\n def add(self, game_object):\n self.objects[game_object.layer].append(game_object)\n\n def remove(self, game_object):\n self.objects[game_object.layer].remove(game_object)\n\n def reset(self, level, dead=False):\n self.level = level\n self.transition = DayTransition(level, dead)\n\n def post_transition(self):\n width, height = GRID_SIZE\n self.objects = {layer: [] for layer in Layer}\n\n if self.player:\n self.player.position = Point(1, height - 2)\n else:\n self.player = Player(Point(1, height - 2))\n\n self.add(self.player)\n\n # Place outer walls and floor\n for x in xrange(width):\n for y in xrange(height):\n if x == 0 or y == 0 or x == width - 1 or y == height - 1:\n self.add(OuterWall.create_random(Point(x, y)))\n else:\n self.add(Floor.create_random(Point(x, y)))\n\n grid_positions = [Point(x, y) for x in xrange(2, width - 2) for y in xrange(2, height - 2)]\n random.shuffle(grid_positions)\n\n # Place inside walls\n wall_count = random.randint(*self.wall_count)\n\n for _ in xrange(wall_count):\n self.add(Wall.create_random(grid_positions.pop()))\n\n # Place enemies\n enemy_count = int(math.log(self.level + 2, 2))\n\n for _ in xrange(enemy_count):\n self.add(Enemy.create_random(grid_positions.pop()))\n\n # Place food\n food_count = random.randint(*self.food_count)\n\n for _ in xrange(food_count):\n self.add(Food.create_random(grid_positions.pop()))\n\n # Place exit\n self.add(Exit(Point(width - 2, 1)))\n\n # Display GUI\n self.gui = Gui(Point(SCREEN_WIDTH / 2, SCREEN_HEIGHT - 50))\n self.add(self.gui)\n\n def draw(self, surface):\n if self.transition and (time.time() * 1000 - self.transition.start > 1000 or self.level < 2):\n self.transition.draw(surface)\n return\n\n for layer in Layer:\n for game_object in self.objects[layer]:\n game_object.draw(surface)\n\n def update_game_objects(self):\n for layer in Layer:\n for game_object in self.objects[layer]:\n game_object.update()\n\n def update(self):\n events = pygame.event.get()\n\n if self.transition and time.time() * 1000 - self.transition.start > 4000:\n self.transition = None\n if self.player and self.player.dead:\n sys.exit()\n self.post_transition()\n if self.transition:\n return\n\n for event in events:\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n move = None\n if event.key == pygame.K_LEFT:\n move = Point(-1, 0)\n if event.key == pygame.K_RIGHT:\n move = Point(1, 0)\n if event.key == pygame.K_UP:\n move = Point(0, -1)\n if event.key == pygame.K_DOWN:\n move = Point(0, 1)\n\n if move:\n scene.player.move(move)\n scene.update_game_objects()\n\nscene = Scene(1)\n","repo_name":"volnt/crawler","sub_path":"src/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13587401030","text":"'''\nThis function is to add new person to exis csv file (if not then create new)\n\nStructure: \n Id, Image, Label\n\n'''\n\nimport pandas as pd\nimport os\n\nDATA_PATH = 'DataFace'\nCSV_PATH = 'CSV_LIST'\nPATH = 'CSV_LIST/CSV_List.csv'\n\ndef toCsvFile():\n '''\n Export new .csv and return tuple (dataFrame, newNames)\n\n '''\n\n if not os.path.isdir(CSV_PATH):\n os.makedirs(CSV_PATH)\n df = pd.DataFrame({'Name':[],'Image':[], 'ID':[]})\n count = 0\n else:\n df = pd.read_csv(PATH)\n count = df['ID'].values.max() + 1\n\n Names = (list)(df['Name'])\n ID = (list)(df['ID'])\n Images = (list)(df['Image'])\n newNames = []\n\n for name in os.listdir(DATA_PATH):\n if name not in Names:\n for image in os.listdir(os.path.join(DATA_PATH, name)):\n Names.append(name)\n Images.append(image)\n ID.append(count)\n count += 1\n newNames.append(name)\n\n df = pd.DataFrame({'Name':Names,'Image':Images, 'ID':ID})\n df.to_csv(PATH, index=False)\n\n return df, newNames\n\n\n","repo_name":"gnvml/STTBK","sub_path":"utils/listCSV.py","file_name":"listCSV.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43336829169","text":"# You are given an array-like data structure Listy which lacks a size method. It does, however,\n# have an elementAt(i) method that returns the element at index i in O(1) time. If i is beyond the\n# bounds of the data structure, it returns -1. (For this reason, the data structure only supports\n# positive integers). Given a Listy which contains sorted, positive integers, find the index at\n# which an element x occurs. If x occurs multiple times, you may return any index.\n\n# Use a list, but don't use the len() method.\n# Time: O(log^2(n))\ndef binary_search(listy, x):\n if listy[0] == -1:\n return None\n lo = 0\n hi = find_size(listy)\n while lo <= hi:\n mid = (lo + hi) // 2\n if x < listy[mid]:\n hi = mid-1\n elif x > listy[mid]:\n lo = mid+1\n else:\n return mid\n return None\n\ndef find_size(listy, i=0, delta=1):\n if listy[i+delta] == -1 and listy[i+delta-1] != -1:\n return i+delta-1\n elif listy[i+delta] != -1:\n return find_size(listy, i, 2*delta)\n else:\n return find_size(listy, i+delta//2, 1)\n\nif __name__ == \"__main__\":\n listy = [0,1,2,3,4,5,6,7,-1]\n print(find_size(listy))\n print(binary_search(listy, 7))\n\n\ndef binary_search(listy, x):\n i = 1\n while listy[i] != -1 and listy[i] < x:\n i *= 2\n return _binary_search(listy, x, i // 2, i)\n\ndef _binary_search(listy, x, lo, hi):\n while lo <= hi:\n mid = (lo + hi) // 2\n if x < listy[mid] or listy[mid] == -1:\n hi = mid-1\n elif x > listy[mid]:\n lo = mid+1\n else:\n return mid\n return None\n\n\nif __name__ == \"__main__\":\n listy = [0,1,2,3,4,5,6,7,-1]\n print(binary_search(listy, 7))\n\n\n\n\n\n","repo_name":"bmpasini/CtCI-6th-Edition","sub_path":"chap-10/10.4.py","file_name":"10.4.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"5"} +{"seq_id":"33356765166","text":"# 문제링크\n# https://programmers.co.kr/learn/courses/30/lessons/12930\n\n# 실패한 풀이..\n# def solution(s):\n# answer = []\n# words = s.split(' ')\n \n# for word in words:\n# word_1 = ''\n# for i in range(len(word)):\n# print(word)\n# if i % 2 == 0:\n# word_1 += word[i].upper()\n# else:\n# word_1 += word[i]\n# answer.append(word_1)\n# return ' '.join(answer)\n\n# 정답 풀이\ndef solution(s):\n answer = '' # 정답을 담아줄 문자열\n cnt = 0 # 공백마다 단어를 구분하고, 각 단어의 인덱스를 표시해줄 변수\n for idx in range(len(s)): # 입력받은 전체 문자열 순회\n if s[idx] != ' ': # 공백이 아니고,\n if cnt % 2 == 0: # 단어의 인덱스가 0,2,4 ... 짝수번째라면\n answer += s[idx].upper() # 대문자로 변환해서 정답 문자열에 더해준다.\n cnt += 1 # 공백이 아니므로 단어의 인덱스 +1\n elif cnt % 2 ==1: # 단어의 인덱스가 1,3,5.. 홀수번째라면\n answer += s[idx].lower() # 소문자로 변환해서 정답 문자열에 더해준다.\n cnt += 1 # 마찬가지로 단어의 인덱스 +1\n else: # 공백이라면,\n answer += ' ' # 공백 그대로 정답 문자열에 더해주고\n cnt = 0 # 새로운 단어가 시작되므로, 단어의 인덱스를 0으로 초기화\n \n return answer # 위에서 대소문자 변환을 거친 정답 문자열 리턴\n\n# print(solution(\" T Ry HeLLO WOrld \"))\n\n","repo_name":"StrummingDown/Programmers","sub_path":"Lv1/MakeStrangeWord.py","file_name":"MakeStrangeWord.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2637764010","text":"\nimport jwt\nfrom os import getenv\nfrom jwt import exceptions\nfrom werkzeug.exceptions import HTTPException\n\n\ndef validate_token(headers, output=False):\n token = validate_headers(headers)\n try:\n if output:\n return jwt.decode(token, key=getenv(\"SECRET\"), algorithms=[\"HS256\"])\n jwt.decode(token, key=getenv(\"SECRET\"), algorithms=[\"HS256\"])\n except exceptions.DecodeError:\n raise HTTPException(\"Invalid token.\")\n except exceptions.ExpiredSignatureError:\n raise HTTPException(\"You are not authorized to perform this operation-token\")\n\n\ndef validate_headers(headers):\n if \"token\" not in headers:\n raise HTTPException(\"You are not authorized to perform this operation-authorization\")\n return headers['token']\n","repo_name":"estiven199/api-users","sub_path":"utils/funtions.py","file_name":"funtions.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25067861135","text":"import os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport yaml\nimport random\nimport warnings\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom kaggle.api.kaggle_api_extended import KaggleApi\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nimport xgboost as xgb\nimport optuna\n\n# 自作モジュール\nfrom preprocessing import preprocessing\nfrom models import WeightAverageEnsembleRegressor\n\ndef objective_wrapper(args, X_train, y_train):\n \"\"\"\n objective に trial 以外の引数を指定可能にするためのラッパーメソッド\n \"\"\"\n def objective(trial):\n #--------------------------------------------\n # ベイズ最適化でのチューニングパイパーパラメーター\n #--------------------------------------------\n params = {\n 'weights1': trial.suggest_discrete_uniform('weights1', 0.00, 0.50, 0.10),\n 'weights2': trial.suggest_discrete_uniform('weights2', 0.00, 0.50, 0.10),\n 'weights3': trial.suggest_discrete_uniform('weights3', 0.00, 1.00, 0.10),\n 'weights4': trial.suggest_discrete_uniform('weights4', 0.50, 1.00, 0.10), \n }\n\n # モデルのパラメータの読み込み\n with open( args.params_file ) as f:\n xgboost_params = yaml.safe_load(f)\n xgboost_model_params = xgboost_params[\"model\"][\"model_params\"]\n xgboost_train_params = xgboost_params[\"model\"][\"train_params\"]\n if( args.debug ):\n print( \"xgboost_params :\\n\", xgboost_params )\n\n #--------------------------------------------\n # k-fold CV での評価\n #--------------------------------------------\n y_preds_train = np.zeros((len(y_train),))\n\n # k-hold cross validation で、学習用データセットを学習用と検証用に分割したもので評価\n # StratifiedKFold は連続値では無効なので、通常の k-fold を使用\n kf = KFold(n_splits=args.n_splits_gs, shuffle=True, random_state=args.seed)\n for fold_id, (train_index, valid_index) in enumerate(kf.split(X_train)):\n #--------------------\n # データセットの分割\n #--------------------\n X_train_fold, X_valid_fold = X_train.iloc[train_index], X_train.iloc[valid_index]\n y_train_fold, y_valid_fold = y_train.iloc[train_index], y_train.iloc[valid_index]\n\n #--------------------\n # モデルの定義\n #--------------------\n knn = KNeighborsRegressor( n_neighbors = 3, p = 2, metric = 'minkowski', n_jobs = -1 )\n svr = SVR( kernel = 'rbf', C = 0.1 )\n random_forest = RandomForestRegressor( criterion = \"mse\", bootstrap = True, n_estimators = 1001, oob_score = True, n_jobs = -1, random_state = args.seed )\n\n xgboost = xgb.XGBRegressor(\n booster = xgboost_model_params['booster'],\n objective = xgboost_model_params['objective'],\n learning_rate = xgboost_model_params['learning_rate'],\n n_estimators = xgboost_model_params['n_estimators'],\n max_depth = xgboost_model_params['max_depth'],\n min_child_weight = xgboost_model_params['min_child_weight'],\n subsample = xgboost_model_params['subsample'],\n colsample_bytree = xgboost_model_params['colsample_bytree'],\n gamma = xgboost_model_params['gamma'],\n alpha = xgboost_model_params['alpha'],\n reg_lambda = xgboost_model_params['reg_lambda'],\n random_state = xgboost_model_params['random_state'] \n )\n\n ensemble = WeightAverageEnsembleRegressor(\n regressors = [ knn, svr, random_forest, xgboost ],\n weights = [ params[\"weights1\"], params[\"weights2\"], params[\"weights3\"], params[\"weights4\"] ],\n )\n\n #--------------------\n # モデルの学習処理\n #--------------------\n ensemble.fit(X_train_fold, y_train_fold)\n\n #--------------------\n # モデルの推論処理\n #--------------------\n y_preds_train[valid_index] = ensemble.predict(X_valid_fold)\n\n if( args.target_norm ):\n rmse = np.sqrt( mean_squared_error( np.exp(y_train), np.exp(y_preds_train) ) ) \n else:\n rmse = np.sqrt( mean_squared_error(y_train, y_preds_train) )\n\n return rmse\n\n return objective\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exper_name\", default=\"ensemble_average_gs\", help=\"実験名\")\n parser.add_argument(\"--dataset_dir\", type=str, default=\"datasets\")\n parser.add_argument(\"--results_dir\", type=str, default=\"results\")\n parser.add_argument(\"--submit_file\", type=str, default=\"submission.csv\")\n parser.add_argument(\"--competition_id\", type=str, default=\"house-prices-advanced-regression-techniques\")\n parser.add_argument(\"--params_file\", type=str, default=\"parames/xgboost_regressor_default.yml\")\n parser.add_argument(\"--n_splits\", type=int, default=4, help=\"CV での学習用データセットの分割数\")\n parser.add_argument(\"--n_splits_gs\", type=int, default=2, help=\"ハイパーパラメーターチューニング時の CV での学習用データセットの分割数\")\n parser.add_argument(\"--n_trials\", type=int, default=50, help=\"Optuna での試行回数\")\n parser.add_argument('--target_norm', action='store_true')\n parser.add_argument(\"--seed\", type=int, default=71)\n parser.add_argument('--submit', action='store_true')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n if( args.debug ):\n for key, value in vars(args).items():\n print('%s: %s' % (str(key), str(value)))\n\n if not os.path.isdir(args.results_dir):\n os.mkdir(args.results_dir)\n if not os.path.isdir( os.path.join(args.results_dir, args.exper_name) ):\n os.mkdir(os.path.join(args.results_dir, args.exper_name))\n\n # 警告非表示\n warnings.simplefilter('ignore', DeprecationWarning)\n\n # seed 値の固定\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n #================================\n # データセットの読み込み\n #================================\n df_train = pd.read_csv( os.path.join(args.dataset_dir, \"train.csv\" ) )\n df_test = pd.read_csv( os.path.join(args.dataset_dir, \"test.csv\" ) )\n df_submission = pd.read_csv( os.path.join(args.dataset_dir, \"sample_submission.csv\" ) )\n if( args.debug ):\n print( \"df_train.head() : \\n\", df_train.head() )\n print( \"df_test.head() : \\n\", df_test.head() )\n print( \"df_submission.head() : \\n\", df_submission.head() )\n\n #================================\n # 前処理\n #================================ \n df_train, df_test = preprocessing( args, df_train, df_test )\n\n # 前処理後のデータセットを外部ファイルに保存\n df_train.to_csv( os.path.join(args.results_dir, args.exper_name, \"train_preprocessed.csv\"), index=True)\n df_test.to_csv( os.path.join(args.results_dir, args.exper_name, \"test_preprocessed.csv\"), index=True)\n if( args.debug ):\n print( \"df_train.head() : \\n\", df_train.head() )\n print( \"df_test.head() : \\n\", df_test.head() )\n\n #===========================================\n # k-fold CV による処理\n #===========================================\n # 学習用データセットとテスト用データセットの設定\n X_train = df_train.drop('SalePrice', axis = 1)\n X_test = df_test\n y_train = df_train['SalePrice']\n y_preds_train = np.zeros((len(y_train),))\n if( args.debug ):\n print( \"len(X_train) : \", len(X_train) )\n print( \"len(y_train) : \", len(y_train) )\n print( \"len(y_preds_train) : \", len(y_preds_train) )\n #print( \"X_train.head() : \\n\", X_train.head() )\n #print( \"X_test.head() : \\n\", X_test.head() )\n #print( \"y_train.head() : \\n\", y_train.head() )\n\n #==============================\n # Optuna によるハイパーパラメーターのチューニング\n #==============================\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective_wrapper(args, X_train,y_train), n_trials=args.n_trials)\n print('best params : ', study.best_params)\n #print('best best_trial : ', study.best_trial)\n\n #================================\n # 最良モデルでの学習 & 推論\n #================================\n # モデルのパラメータの読み込み\n with open( args.params_file ) as f:\n params_xgboost = yaml.safe_load(f)\n model_params_xgboost = params_xgboost[\"model\"][\"model_params\"]\n model_train_params_xgboost = params_xgboost[\"model\"][\"train_params\"]\n if( args.debug ):\n print( \"params_xgboost :\\n\", params_xgboost )\n\n # k-hold cross validation で、学習用データセットを学習用と検証用に分割したもので評価\n # StratifiedKFold は連続値では無効なので、通常の k-fold を使用\n kf = KFold(n_splits=args.n_splits, shuffle=True, random_state=args.seed)\n\n y_preds_test = []\n for fold_id, (train_index, valid_index) in enumerate(kf.split(X_train)):\n #--------------------\n # データセットの分割\n #--------------------\n X_train_fold, X_valid_fold = X_train.iloc[train_index], X_train.iloc[valid_index]\n y_train_fold, y_valid_fold = y_train.iloc[train_index], y_train.iloc[valid_index]\n\n #--------------------\n # 回帰モデル定義\n #--------------------\n knn = KNeighborsRegressor( n_neighbors = 3, p = 2, metric = 'minkowski', n_jobs = -1 )\n svr = SVR( kernel = 'rbf', C = 0.1 )\n random_forest = RandomForestRegressor( criterion = \"mse\", bootstrap = True, n_estimators = 1001, oob_score = True, n_jobs = -1, random_state = args.seed )\n\n xgboost = xgb.XGBRegressor(\n booster = model_params_xgboost['booster'],\n objective = model_params_xgboost['objective'],\n learning_rate = model_params_xgboost['learning_rate'],\n n_estimators = model_params_xgboost['n_estimators'],\n max_depth = model_params_xgboost['max_depth'],\n min_child_weight = model_params_xgboost['min_child_weight'],\n subsample = model_params_xgboost['subsample'],\n colsample_bytree = model_params_xgboost['colsample_bytree'],\n gamma = model_params_xgboost['gamma'],\n alpha = model_params_xgboost['alpha'],\n reg_lambda = model_params_xgboost['reg_lambda'],\n random_state = model_params_xgboost['random_state'] \n )\n\n ensemble = WeightAverageEnsembleRegressor(\n regressors = [ knn, svr, random_forest, xgboost ],\n weights = [study.best_params[\"weights1\"], study.best_params[\"weights2\"], study.best_params[\"weights3\"], study.best_params[\"weights4\"] ],\n )\n\n #--------------------\n # モデルの学習処理\n #--------------------\n ensemble.fit(X_train, y_train)\n\n #--------------------\n # モデルの推論処理\n #--------------------\n y_pred_test = ensemble.predict(X_test)\n y_preds_test.append(y_pred_test)\n\n y_preds_train[valid_index] = ensemble.predict(X_valid_fold)\n #print( \"[{}] len(y_pred_fold) : {}\".format(fold_id, len(y_preds_train)) )\n \n # 正解データとの平均2乗平方根誤差で評価\n if( args.target_norm ):\n rmse = np.sqrt( mean_squared_error( np.exp(y_train), np.exp(y_preds_train) ) ) \n else:\n rmse = np.sqrt( mean_squared_error(y_train, y_preds_train) )\n\n print( \"RMSE [k-fold CV train-valid] : {:0.5f}\".format(rmse) )\n\n #================================\n # 可視化処理\n #================================\n # 回帰対象\n sns.distplot(df_train['SalePrice'] )\n sns.distplot(y_preds_train)\n if( args.target_norm ):\n plt.savefig( os.path.join(args.results_dir, args.exper_name, \"SalePrice_w_norm.png\"), dpi = 300, bbox_inches = 'tight' )\n else:\n plt.savefig( os.path.join(args.results_dir, args.exper_name, \"SalePrice_wo_norm.png\"), dpi = 300, bbox_inches = 'tight' )\n\n #================================\n # Kaggle API での submit\n #================================\n # 提出用データに値を設定\n y_sub = sum(y_preds_test) / len(y_preds_test)\n if( args.target_norm ):\n df_submission['SalePrice'] = list(map(float, np.exp(y_sub)))\n else:\n df_submission['SalePrice'] = list(map(float, y_sub))\n\n df_submission.to_csv( os.path.join(args.results_dir, args.exper_name, args.submit_file), index=False)\n\n if( args.submit ):\n # Kaggle-API で submit\n api = KaggleApi()\n api.authenticate()\n api.competition_submit( os.path.join(args.results_dir, args.exper_name, args.submit_file), args.exper_name, args.competition_id)\n os.system('kaggle competitions submissions -c {}'.format(args.competition_id) )","repo_name":"Yagami360/kaggle_exercises","sub_path":"house-prices-advanced-regression-techniques/ensemble_average_gs.py","file_name":"ensemble_average_gs.py","file_ext":"py","file_size_in_byte":13502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"9855044863","text":"import bdpy\nimport numpy as np\n\n\n# Data settings\nvoxel_data = np.array([np.arange(10),\n np.arange(10) + 10,\n np.arange(10) + 20,\n np.arange(10) + 30,\n np.arange(10) + 40,\n np.arange(10) + 50])\nroi_v1 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\nroi_v2 = [0, 0, 0, 0, 0, 1, 1, 0, 1, 0]\nroi_v3 = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1]\nruns = np.array([1, 1, 1, 2, 2, 2]).T\nstimulus_name = np.array([2, 1, 3, 1, 3, 2]).T\nvmap = {\n 'stimulus_name': {\n 1: 'stimulus_01',\n 2: 'stimulus_02',\n 3: 'stimulus_03',\n }\n}\n\nbdata = bdpy.BData()\nbdata.add(voxel_data, 'VoxelData')\nbdata.add_metadata('ROI_V1', roi_v1, description='ROI V1', where='VoxelData')\nbdata.add_metadata('ROI_V2', roi_v2, description='ROI V2', where='VoxelData')\nbdata.add_metadata('ROI_V3', roi_v3, description='ROI V3', where='VoxelData')\nbdata.add(runs, 'Run')\nbdata.add(stimulus_name, 'stimulus_name')\nbdata.add_vmap('stimulus_name', vmap['stimulus_name'])\n\nbdata.save('test.h5')\n","repo_name":"KamitaniLab/BrainDecoder.jl","sub_path":"test/data/make_testdata.py","file_name":"make_testdata.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9315293667","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .pyscripts.ita import convert_to_gds\nimport json\n\n\n\ndef home_page(request):\n return render(request,\"index.html\")\n\ndef mobile_page(request):\n return render(request,\"mobile.html\")\n\ndef show_process(request):\n if request.method == \"GET\":\n input_text = request.GET.get(\"the_get\").split('\\n')\n input_type = request.GET.get(\"input_type\")\n gds_format = convert_to_gds(input_text,input_type)\n if gds_format:\n response_data = {\n \"output_text\": gds_format\n }\n else:\n response_data = {\n \"output_text\":\"Cannot convert\"\n }\n print(response_data)\n return HttpResponse(\n json.dumps(response_data),\n content_type = \"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"output_text\":\"nothing to see\"}),\n content_type = \"application/json\"\n )\n","repo_name":"Kama-lab/GDSToolBox","sub_path":"toolbox/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39010991751","text":"\"\"\"\nExample:\nConcider the problem\n0.5 * (x1^2 + 2x2^2 + 3x3^2) + 15x1 + 8x2 + 80x3 -> min (1)\nsubjected to\nx1 + 2x2 + 3x3 <= 150 (2)\n8x1 + 15x2 + 80x3 <= 800 (3)\nx2 - x3 = 25.5 (4)\n\"\"\"\n\nimport unittest\nimport pdb\n\nimport numpy as np\nimport cvxopt\nfrom cvxopt import matrix\nfrom numpy import diag, inf\nfrom openopt import QP\n\nclass TestCvxopt(unittest.TestCase):\n def test_cvxopt(self):\n Q = matrix([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]])\n p = matrix([15.0, 8.0, 80.0])\n G = matrix([[1.0, 2.0, 3.0], [8.0, 15.0, 80.0], [0, 0, 0]])\n h = matrix([150.0, 800.0, 0], (3, 1))\n A = matrix([0.0, 1.0, -1.0], (1,3))\n b = matrix(25.5)\n\n sol = cvxopt.solvers.qp(Q, p, G, h, A, b)\n\n p = QP(diag([1, 2, 3]),\n [15, 8, 80],\n A = np.matrix(\"1 2 3; 8 15 80\"),\n b = [150, 800],\n Aeq = [0, 1, -1],\n beq = 25.5)\n r = p._solve('cvxopt_qp', iprint = 0)\n f_opt, x_opt = r.ff, r.xf\n\n np.testing.assert_almost_equal(f_opt, sol['primal objective'], decimal=5)\n np.testing.assert_almost_equal(x_opt, np.squeeze(sol['x']), decimal=5)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"megacell/block-simplex-least-squares","sub_path":"tests/fast/test_cvxopt.py","file_name":"test_cvxopt.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8061982231","text":"import os\nimport yaml\nimport numpy as np\nimport time, timeit\n\ndef timer(func):\n def wrapper(*args, **kwargs):\n t0 = timeit.default_timer()\n result = func(*args, **kwargs)\n elapsed = timeit.default_timer() - t0\n # name = func.__name__\n # arg_str = ', '.join(repr(arg) for arg in args)\n # print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n print('predict time: %0.8fs' % elapsed)\n return result\n return wrapper\n\n\ndef read_yaml(filename):\n with open(filename, 'r') as f:\n data = yaml.load(f)\n return data\n\ndef save_yaml(filename, data):\n with open(filename, 'w') as f:\n yaml.dump(data, f)\n\ndef interp2d(corners, r, c):\n if not isinstance(corners, np.ndarray):\n corners = np.array(corners)\n assert corners.shape[0] == 4\n\n x1 = corners[0]\n x2 = corners[1]\n x3 = corners[2]\n x4 = corners[3]\n\n y1 = x1 + (r/9.)*(x3-x1)\n y2 = x2 + (r/9.)*(x4-x2)\n z = y1 + (c/8.)*(y2-y1)\n\n return z.tolist()\n\ndef order_points(pts):\n if not isinstance(pts, np.ndarray):\n pts = np.array(pts, dtype=np.float32)\n\n polygon = np.zeros((4, 2), dtype=np.float32)\n\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis=1)\n polygon[0] = pts[np.argmin(s)]\n polygon[2] = pts[np.argmax(s)]\n\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis=1)\n polygon[1] = pts[np.argmin(diff)]\n polygon[3] = pts[np.argmax(diff)]\n\n # return the ordered coordinates\n return polygon.tolist()\n\ndef rc_to_code(r, c):\n code_str = 'abcdefghi'\n code = '{}{}'.format(code_str[c], r)\n return code\n\ndef round_int(x):\n return int(round(x))\n\ndef kill():\n os._exit(0)","repo_name":"caomxin/chessbot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35986987894","text":"import librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn\n\n\ngan1, sr = librosa.load(\n 'c:/nmb/nmb_data/checkpoints2/5000.wav'\n)\n\ngan2, sr = librosa.load(\n 'c:/nmb/nmb_data/checkpoints2/10000.wav'\n)\n\ngan3, sr = librosa.load(\n 'c:/nmb/nmb_data/checkpoints2/20000.wav'\n)\n\ngan4, sr = librosa.load(\n 'c:/nmb/nmb_data/checkpoints2/30000.wav'\n)\n\ngan1 = np.fft.fft(gan1)\ngan1 = abs(gan1)\ngan1_r = np.fft.fftfreq(len(gan1), d = 1.0)\n\ngan2 = np.fft.fft(gan2)\ngan2 = abs(gan2)\ngan2_r = np.fft.fftfreq(len(gan2), d = 1.0)\n\ngan3 = np.fft.fft(gan3)\ngan3 = abs(gan3)\ngan3_r = np.fft.fftfreq(len(gan3), d = 1.0)\n\ngan4 = np.fft.fft(gan4)\ngan4 = abs(gan4)\ngan4_r = np.fft.fftfreq(len(gan4), d = 1.0)\n\n# fig = plt.figure(figsize = (16, 6))\n\n# ax1 = fig.add_subplot(2, 2, 1)\n# ax2 = fig.add_subplot(2, 2, 2)\n# ax3 = fig.add_subplot(2, 2, 3)\n# ax4 = fig.add_subplot(2, 2, 4)\n\n# ax1.set(title = '5000')\n# ax2.set(title = '10000')\n# ax3.set(title = '20000')\n# ax4.set(title = '30000')\n\n# librosa.display.waveplot(gan1, sr, ax = ax1)\n# librosa.display.waveplot(gan2, sr, ax = ax2)\n# librosa.display.waveplot(gan3, sr, ax = ax3)\n# librosa.display.waveplot(gan4, sr, ax = ax4)\n\n# ax1.plot(gan1_r, gan1)\n# ax2.plot(gan2_r, gan2)\n# ax3.plot(gan3_r, gan3)\n# ax4.plot(gan4_r, gan4)\n\nplt.plot(gan4_r, gan4)\nplt.xlim(0, 0.5)\nplt.ylim(0, 100)\n\n# fig.tight_layout()\n# plt.xlim(0, 5)\n# plt.ylim(0, 100)\nplt.show()\n\n######################### csv visualization ############################\n\n'''\ndf = pd.read_csv(\n 'c:/nmb/nmb_data/loss_2.csv'\n)\n\nprint(df.info())\nprint(df)\n\nfig = plt.figure(figsize = (16, 6))\nax1 = fig.add_subplot(2, 1, 1)\nax2 = fig.add_subplot(2, 1, 2)\n\ndf_vis_d = df['d_loss'].plot()\ndf_vis_g = df['g_loss'].plot()\nax1 = df_vis_d.get_figure()\nax2 = df_vis_g.get_figure()\n\nplt.legend(loc = 'best')\nplt.show()\n'''\n\n# df = pd.read_csv(\n# 'c:/nmb/nmb_data/loss_2.csv'\n# )\n\n# fig = plt.figure(figsize=(16, 6))\n\n# # plt.title('generator_loss')\n\n# df1 = df.loc[:5001, :]\n# df2 = df.loc[:10001, :]\n# df3 = df.loc[:20001, :]\n# df4 = df.loc[:30001, :]\n\n# df1 = df1['d_loss']\n# df2 = df2['d_loss']\n# df3 = df3['d_loss']\n# df4 = df4['d_loss']\n\n# ax1 = fig.add_subplot(2, 2, 1)\n# ax2 = fig.add_subplot(2, 2, 2)\n# ax3 = fig.add_subplot(2, 2, 3)\n# ax4 = fig.add_subplot(2, 2, 4)\n\n# ax1.plot(df1, color = 'orange')\n# ax2.plot(df2, color = 'orange')\n# ax3.plot(df3, color = 'orange')\n# ax4.plot(df4, color = 'orange')\n\n# ax1.set_title('5000')\n# ax2.set_title('10000')\n# ax3.set_title('20000')\n# ax4.set_title('30000')\n\n# fig.tight_layout()\n# plt.legend(loc = 'best')\n# plt.show()\n\n\n'''\nd_loss = df['d_loss']\ng_loss = df['g_loss']\n\nax1 = plt.subplot(2, 1, 1)\nax2 = plt.subplot(2, 1, 2)\n\nax1.plot(d_loss, color = 'b')\nplt.title('d_loss')\nax2.plot(g_loss, color = 'orange')\nplt.title('g_loss')\n\nfig.tight_layout()\nplt.show()\n'''","repo_name":"zashin-AI/juhyeong","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70051063831","text":"\"\"\"Extract the most recurrent tokens of the template text\"\"\"\n\nimport json\nimport more_itertools\nimport mwxml\nimport datetime\nfrom typing import Iterable, Iterator, Mapping, Optional\nfrom backports.datetime_fromisoformat import MonkeyPatch\n# nltk\nfrom .. import extractors, user_warnings_en, user_warnings_it, user_warnings_es, user_warnings_ca, utils\nimport math\nimport random\n\n# Polyfiller for retrocompatibiliy with Python3.5\nMonkeyPatch.patch_fromisoformat()\n\n# MAX REVISIONS\nMAX_REVISION_CACHE = 100\n\n# REVISION STORAGE\nREVISION_STORAGE = list()\n\n# time interval in seconds\ntime_interval_in_seconds = {\n '1 day': 86400,\n '1 week': 604800\n}\n\n# user warnings templates\nuser_warnings_templates = set(\n user_warnings_en.block_templates_indefinitely_blocked_templates + \\\n user_warnings_en.block_templates + \\\n user_warnings_en.arbitration_enforcement_templates_1RR_related_templates + \\\n user_warnings_en.arbitration_enforcement_templates_pages_with_discretionary_sanctions_editnotice + \\\n user_warnings_en.arbitration_enforcement_templates + \\\n user_warnings_en.csd_warning_templates + \\\n user_warnings_en.community_authorised_general_sanctions_templates + \\\n user_warnings_en.community_authorised_general_sanctions_templates_standardized + \\\n user_warnings_en.community_authorised_general_sanctions_templates_obsolete + \\\n user_warnings_en.non_english_welcome + \\\n user_warnings_en.non_english + \\\n user_warnings_en.test_templates + \\\n user_warnings_en.standardized_templates + \\\n user_warnings_en.user_warnings_templates + \\\n\n user_warnings_it.avviso_utenti_anonimi + \\\n user_warnings_it.benvenuto + \\\n user_warnings_it.benvenuto_progetti + \\\n user_warnings_it.avviso_copyright + \\\n user_warnings_it.avviso_invito_progetti + \\\n user_warnings_it.vandalismo + \\\n\n user_warnings_es.bienvenida + \\\n user_warnings_es.permission_grant_notification_templates + \\\n user_warnings_es.user_warnings + \\\n\n user_warnings_ca.benvinguda + \\\n user_warnings_ca.Avisos_de_discussio + \\\n user_warnings_ca.plantilles_d_avisos_d_edicio_generics + \\\n user_warnings_ca.plantilles_d_avisos_d_edicio + \\\n user_warnings_ca.plantilles_d_avisos_d_idioma + \\\n user_warnings_ca.plantilles_d_avisos \n)\n\n# REVISION AND PAGE CLASSES\nclass Revision:\n \"\"\"Class which represent a revision of the template page\"\"\"\n def __init__(self, id: str, user: mwxml.Revision.User, timestamp: str, template_info: extractors.user_warnings_template_words.UserWarningTf):\n self.id = id # revision id\n self.user = user # revision user\n self.timestamp = timestamp # revision timestamp\n self.template_info = template_info # template information about the words stemmed and without stopwords and occurences\n self.words_to_search = list() # list of the k words which characterizes the the template the most (k = template_info.total_number_words / 2)\n\n def to_dict(self) -> str:\n \"\"\"Converts the object instance into a dictionary\"\"\"\n obj = dict()\n obj['id'] = self.id\n user_id = ''\n user_name = ''\n if self.user:\n user_id = self.user.id\n user_name = self.user.text\n obj['user_id'] = user_id\n obj['user_name'] = user_name\n obj['timestamp'] = self.timestamp\n obj['template_info'] = self.template_info.to_dict()\n obj['words_to_search'] = self.words_to_search\n return obj\n\n def __repr__(self):\n return 'date: {}'.format(self.timestamp)\n\n def __lt__(self, other):\n return datetime.datetime.fromisoformat(self.timestamp.replace('Z', '+00:00')) < datetime.datetime.fromisoformat(other.timestamp.replace('Z', '+00:00'))\n\n\nclass Page:\n \"\"\"Class which represent a page containing a list of revisions\"\"\"\n def __init__(self, id: str, namespace: str, title: str, revisions: Iterator[Revision], tfidf: Mapping, idf: Mapping, occurences_in_corpus: Mapping):\n self.id = id # page id\n self.namespace = namespace # page namespace\n self.title = title # page title\n self.revisions = revisions # list of revisions\n self.tfidf=tfidf # tf-idf metrics\n self.occurences_in_corpus = occurences_in_corpus # stemmed word occurences in corups (1 if the word appear in a corpus 0 othewise)\n self.idf = idf # idf metric in corpus\n\n def to_dict(self) -> Mapping:\n \"\"\"Converts the object instance into a dictionary\"\"\"\n obj = dict()\n obj['id'] = self.id\n obj['namespace'] = self.namespace\n obj['title'] = self.title\n obj['revisions'] = list()\n for rev in self.revisions:\n obj['revisions'].append(rev.to_dict())\n obj['tf-idf'] = self.tfidf \n obj['occurences_in_corupus'] = self.occurences_in_corpus\n obj['idf'] = self.idf\n return obj\n\ndef extract_revisions(\n mw_page: mwxml.Page,\n stats: Mapping,\n only_last_revision: bool,\n language: str,\n stemmer: bool) -> Iterator[Revision]:\n \n \"\"\"Extracts the history of a user_warning_template within a template page -> most important keywords.\"\"\"\n revisions = more_itertools.peekable(mw_page)\n\n # Newest revisions, useful only if the only_last_revision flag is set equal to true\n newest_revision = None\n\n for mw_revision in revisions:\n utils.dot()\n\n # check if it's last revision\n is_last_revision = not utils.has_next(revisions)\n\n # remove html comments\n text = utils.remove_comments(mw_revision.text or '')\n\n # extract the template text and other info\n template_info = extractors.user_warnings_template_words.userwarnings_words_extractor(text, language, stemmer)\n\n # Build the revision\n rev = Revision(\n id=mw_revision.id,\n user=mw_revision.user,\n timestamp=mw_revision.timestamp.to_json(),\n template_info=template_info,\n )\n\n # Check the oldest revisions possible\n if not newest_revision:\n newest_revision = rev\n else:\n newest_date = datetime.datetime.fromisoformat(newest_revision.timestamp.replace('Z', '+00:00'))\n current_date = datetime.datetime.fromisoformat(mw_revision.timestamp.to_json().replace('Z', '+00:00'))\n # change the revision if the current one is newer\n if newest_date < current_date:\n newest_revision = rev\n\n # Update stats\n stats['performance']['revisions_analyzed'] += 1\n\n # requested only the last revision\n if only_last_revision:\n if is_last_revision:\n yield newest_revision\n else:\n yield rev\n\ndef extract_pages(\n dump: Iterable[mwxml.Page],\n stats: Mapping,\n only_last_revision: bool,\n set_interval: Optional[str],\n esclude_template_repetition: bool,\n language: str,\n stemmer: bool,\n minimum_word_length: int) -> Iterator[Page]:\n \"\"\"Extract the templates from an user page.\"\"\"\n\n counter = 1\n\n # Loop on all the pages in the dump, one at a time\n for mw_page in dump:\n utils.log(\"Processing\", mw_page.title)\n \n # Skip non-template, according to https://en.wikipedia.org/wiki/Wikipedia:Namespace\n if mw_page.namespace != 10:\n utils.log('Skipped (namespace != 10)')\n continue\n\n # flag which tells if the revision can be stored\n store_flag = False\n\n # those revision can replace / be stored in the revision_storage\n if not mw_page.title.lower() in user_warnings_templates:\n store_flag = True\n else:\n counter += 1\n\n revisions_generator = extract_revisions(\n mw_page,\n stats=stats,\n only_last_revision=only_last_revision,\n language=language,\n stemmer=stemmer\n )\n\n revisions_list = list(revisions_generator)\n # sort the revision list by date\n revisions_list.sort()\n # filtered revision list\n filtered_revisions_list = list()\n\n # reference revisions\n reference_rev = None\n\n # take the first reference revision and insert it\n if revisions_list:\n reference_rev = revisions_list[0]\n filtered_revisions_list.append(reference_rev)\n\n # partition time by time interval specified by set_interval\n if set_interval or esclude_template_repetition:\n for elem in revisions_list:\n # ge the last inserted and current time interval\n last_inserted_time = datetime.datetime.fromisoformat(reference_rev.timestamp.replace('Z', '+00:00'))\n current_time = datetime.datetime.fromisoformat(elem.timestamp.replace('Z', '+00:00'))\n condition = True\n if set_interval:\n # condition for the time interval\n condition = condition and (current_time - last_inserted_time).total_seconds() < time_interval_in_seconds[set_interval]\n if esclude_template_repetition:\n # condition for the different regexp\n condition = condition and reference_rev.template_info.template_text != elem.template_info.template_text\n if condition:\n filtered_revisions_list[-1] = elem # substitute because included in the time interval (partitioned by the time interval)\n else:\n # if there is the different regexp selected then inserted only if the previous one has different regexp than the current one\n if not (esclude_template_repetition and reference_rev.template_info.template_text == elem.template_info.template_text):\n filtered_revisions_list.append(elem)\n reference_rev = elem\n else:\n # no tag selected\n filtered_revisions_list = revisions_list\n\n # filter out the empty revisions\n filtered_revisions_list = [ rev for rev in filtered_revisions_list if rev.template_info.total_number_words != 0 ]\n\n if store_flag:\n\n # REVISION STORAGE update\n rev_storage_size = len(REVISION_STORAGE)\n filtered_rev_size = len(filtered_revisions_list)\n\n # store the revision in this cache\n if (rev_storage_size + filtered_rev_size) <= MAX_REVISION_CACHE:\n # fill the revision storage\n REVISION_STORAGE.extend(filtered_revisions_list)\n elif rev_storage_size <= MAX_REVISION_CACHE:\n # replace some revisions\n min_length = min(rev_storage_size, filtered_rev_size)\n for i in range(random.randrange(min_length)):\n REVISION_STORAGE[i] = filtered_revisions_list[i]\n else:\n # fill and replace some revisions\n filtered_rev_counter = 0\n while(rev_storage_size < MAX_REVISION_CACHE):\n REVISION_STORAGE.append(filtered_revisions_list[filtered_rev_counter])\n filtered_rev_counter += 1\n rev_storage_size += 1\n for index in range(filtered_rev_counter, filtered_rev_size):\n rev_storage_index = random.randrange(rev_storage_size)\n REVISION_STORAGE[rev_storage_index] = filtered_revisions_list[index]\n else:\n\n # extended corpus\n extended_corpus = list(filtered_revisions_list)\n rev_range_size = len(REVISION_STORAGE)\n\n # extended corpus\n for index in range(len(filtered_revisions_list)):\n extended_corpus.append(REVISION_STORAGE[random.randrange(rev_range_size)])\n\n # element occur in document\n is_in_document_dict = dict()\n corpus_size = len(extended_corpus)\n\n # word list\n words_list = set()\n\n # retrieve only the interesting words\n for revision in filtered_revisions_list:\n for word in revision.template_info.inf_retrieval:\n words_list.add(word) \n \n # is in document calculus\n for revision in extended_corpus:\n for word in revision.template_info.inf_retrieval:\n # only in the interesting words\n if word in words_list:\n if not word in is_in_document_dict:\n is_in_document_dict[word] = 1\n else:\n is_in_document_dict[word] += 1\n\n # idf word calculus\n idf_dict = dict() # idf per corpus\n for word in is_in_document_dict:\n idf_dict[word] = math.log(corpus_size / is_in_document_dict[word], 10)\n \n # tf-idf calculus\n # girare il loop o qualcosa di simile, vedere dopo come\n tfidf = dict() # the corpus is constant, so it will be indicized by word and document\n for word in is_in_document_dict: # for every word\n tfidf[word] = dict()\n for doc_index in range(len(filtered_revisions_list)): # for all document\n rev = filtered_revisions_list[doc_index]\n # calculate tf for word in document\n if word in rev.template_info.inf_retrieval:\n tf = rev.template_info.inf_retrieval[word] / rev.template_info.total_number_words\n else:\n tf = 0\n # multiply it by the idf of that word\n tfidf[word][doc_index] = tf * idf_dict[word]\n # assign the words to keep\n rev.words_to_search.append((word, tfidf[word][doc_index]))\n\n # take the words needed\n for rev in filtered_revisions_list:\n k = int(rev.template_info.total_number_words / 2)\n # words to search\n rev.words_to_search.sort(key = lambda a: a[1], reverse=True)\n # check if there's a minimum amount of character needed:\n if minimum_word_length:\n index = 0\n for word,_ in rev.words_to_search:\n # controlling the word size\n if len(word) > minimum_word_length:\n rev.words_to_search[index] = (word,_)\n index += 1\n rev.words_to_search = rev.words_to_search[:index]\n # taking the k values with the highest tf-idf metric value associated\n rev.words_to_search = [ el[0] for el in rev.words_to_search[:k] ]\n\n\n # stats update\n if not language in stats['user_warnings_templates']:\n stats['user_warnings_templates'][language] = dict()\n \n stats['user_warnings_templates'][language][mw_page.title] = dict()\n stats['user_warnings_templates'][language][mw_page.title]['word_occurences'] = is_in_document_dict\n stats['user_warnings_templates'][language][mw_page.title]['tf-idf'] = tfidf\n\n page = Page(\n id=mw_page.id,\n namespace=mw_page.namespace,\n title=mw_page.title,\n revisions=filtered_revisions_list,\n tfidf=tfidf,\n idf=idf_dict,\n occurences_in_corpus=is_in_document_dict\n )\n\n yield page\n\ndef configure_subparsers(subparsers):\n \"\"\"Configure a new subparser for the known languages.\"\"\"\n parser = subparsers.add_parser(\n 'extract-user-warnings-templates-tokens',\n help='Extract the tokens of the templates of the users warnings',\n )\n parser.add_argument(\n '--only-last-revision',\n action='store_true',\n help='Consider only the last revision for each page.',\n )\n parser.add_argument(\n '--set-interval',\n choices={None, '1 day', '1 week'},\n required=False,\n default=None,\n help='Time interval at the end of which to return the revison',\n )\n parser.add_argument(\n '--esclude-template-repetition',\n action='store_true',\n help='It does not return a revision if the same template was previously declared',\n )\n parser.add_argument(\n '--language',\n choices={'italian', 'catalan', 'spanish', 'english'},\n required=True,\n help='Language of the analyzed dump',\n )\n parser.add_argument(\n '--rev-cache',\n action='store_true',\n required=False,\n help='Max revision cache',\n )\n parser.add_argument(\n '--stemmer',\n action='store_true',\n required=False,\n help='Retrieve stemmed words',\n )\n parser.add_argument(\n '--minimum-word-length',\n action='store',\n type=int,\n default=0,\n required=False,\n help='Minimum word lenght to retrieve',\n )\n parser.set_defaults(func=main)\n\n\ndef main(\n dump: Iterable[mwxml.Page],\n features_output_h,\n stats_output_h,\n args) -> None:\n \"\"\"Main function that parses the arguments and writes the output.\"\"\"\n\n stats = {\n 'performance': {\n 'start_time': None,\n 'end_time': None,\n 'revisions_analyzed': 0,\n 'pages_analyzed': 0,\n },\n 'user_warnings_templates': dict() # maybe the top 5 or all the best templates\n }\n\n if args.rev_cache:\n try: \n global MAX_REVISION_CACHE \n x = int(args.rev_cache)\n if x > 0:\n MAX_REVISION_CACHE = x\n except ValueError:\n pass\n\n pages_generator = extract_pages(\n dump,\n stats=stats,\n only_last_revision=args.only_last_revision,\n set_interval=args.set_interval,\n esclude_template_repetition=args.esclude_template_repetition,\n language=args.language,\n stemmer=args.stemmer,\n minimum_word_length=args.minimum_word_length\n )\n\n stats['performance']['start_time'] = datetime.datetime.utcnow()\n\n for obj in pages_generator:\n features_output_h.write(json.dumps(obj.to_dict()))\n features_output_h.write(\"\\n\")\n \n stats['performance']['end_time'] = datetime.datetime.utcnow()\n stats_output_h.write(json.dumps(stats, indent=4, default=str))","repo_name":"CristianCantoro/wikidump-lang-breaks-warns","sub_path":"wikidump/processors/user_warnings_templates_tokens.py","file_name":"user_warnings_templates_tokens.py","file_ext":"py","file_size_in_byte":18858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19811996564","text":"from itertools import combinations\nfrom data.ingest import load_skills\nfrom data import model\n\n\ndef find_dual_damage_skills():\n skill_list = load_skills()\n dual_damage_types = {c: [] for c in combinations(model.DamageType, 2)}\n for damage_type_pair in dual_damage_types:\n for skill in skill_list:\n if damage_type_pair[0] in skill.damage_types and damage_type_pair[1] in skill.damage_types:\n dual_damage_types[damage_type_pair].append(skill)\n return dual_damage_types\n\n\nif __name__ == '__main__':\n type_pairs = find_dual_damage_skills()\n for pair in type_pairs:\n print('{0}/{1} skills: {2}'.format(pair[0].name, pair[1].name, len(type_pairs[pair])))\n if pair[0].name == 'Slash' and pair[1].name == 'Blunt':\n for skill in type_pairs[pair]:\n print(' {0}'.format(skill.name))\n","repo_name":"faith-grins/RS-RS-DamageRankings","sub_path":"search/skill_search.py","file_name":"skill_search.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35027443373","text":"import os\nimport socket\nimport pathlib\nimport typing\nfrom xmlrpc import client\nfrom http.client import HTTPConnection\nimport datetime\nfrom pydantic import (\n BaseModel,\n Field\n)\n\n\nSUPERVISORD_ADDRESS_HTTP = os.getenv(\n 'SUPERVISORD_ADDRESS_HTTP',\n 'http://localhost'\n)\nSUPERVISORD_ADDRESS_UNIX_SOCKET = os.getenv(\n 'SUPERVISORD_ADDRESS_UNIX_SOCKET',\n '/var/run/supervisor/supervisor.sock'\n)\n\n\nclass SupervisordProcessStates:\n \"\"\"High Level Supervisord Process States\n\n A process controlled by supervisord will be in one of the below states at any given time.\n You may see these state names in various user interface elements in clients.\n\n Reference: http://supervisord.org/subprocess.html#process-states\n \"\"\"\n\n STOPPED = 0\n \"\"\"The process has been stopped due to a stop request or has never been started\"\"\"\n STARTING = 10\n \"\"\"The process is starting due to a start request\"\"\"\n RUNNING = 20\n \"\"\"The process is running\"\"\"\n BACKOFF = 30\n \"\"\"\n The process entered the STARTING state but subsequently \n exited too quickly (before the time defined in startsecs) to move to the RUNNING state\n \"\"\"\n STOPPING = 40\n \"\"\"The process is stopping due to a stop request\"\"\"\n NOT_RUNNING = 70\n \"\"\"Process is not running\"\"\"\n ALREADY_STARTED = 60\n \"\"\"Process already started and running\"\"\"\n EXITED = 100\n \"\"\"The process exited from the RUNNING state (expectedly or unexpectedly)\"\"\"\n FATAL = 200\n \"\"\"The process could not be started successfully\"\"\"\n UNKNOWN = 1000\n \"\"\"The process is in an unknown state (supervisord programming error)\"\"\"\n\n\nclass ProcessInfoState:\n \"\"\"\n This is an internal value maintained by Supervisor that determines what Supervisor believes to be its current operational state.\n \"\"\"\n\n FATAL = 2\n \"\"\"Supervisor has experienced a serious error\"\"\"\n RUNNING = 1\n \"\"\"Supervisor is working normally\"\"\"\n RESTARTING = 0\n \"\"\"Supervisor is in the process of restarting\"\"\"\n SHUTDOWN = -1\n \"\"\"Supervisor is in the process of shutting down\"\"\"\n\n\nclass ProcessInfo(BaseModel):\n name: str = Field(\n description=\"Name of the process\"\n )\n group: str = Field(\n description=\"Name of the process’ group\"\n )\n description: str = Field(\n description=(\n \"If process state is running description’s value is process_id and uptime. \"\n \"Example “pid 18806, uptime 0:03:12 ”. If process state is stopped description’s value is stop time. \"\n \"Example:”Jun 5 03:16 PM ”.\"\n )\n )\n start: datetime.datetime = Field(\n description=\"UNIX timestamp of when the process was started\"\n )\n stop: datetime.datetime = Field(\n description=\"UNIX timestamp of when the process last ended, or 0 if the process has never been stopped.\"\n )\n now: datetime.datetime = Field(\n description=\"UNIX timestamp of the current time, which can be used to calculate process up-time.\"\n )\n state: int = Field(\n description=\"State code, see Process States.\"\n )\n statename: str = Field(\n description=\"String description of state, see Process States.\"\n )\n logfile: pathlib.Path = Field(\n description=(\n \"Deprecated alias for stdout_logfile. \"\n \"This is provided only for compatibility with clients written for Supervisor 2.x and may be removed \"\n \"in the future. Use stdout_logfile instead.\"\n )\n )\n stdout_logfile: pathlib.Path = Field(\n description=\"Absolute path and filename to the STDOUT logfile\"\n )\n stderr_logfile: pathlib.Path = Field(\n description=\"Absolute path and filename to the STDOUT logfile\"\n )\n spawnerr: str = Field(\n description=\"Description of error that occurred during spawn, or empty string if none.\"\n )\n exitstatus: int = Field(\n description=\"Exit status (errorlevel) of process, or 0 if the process is still running.\"\n )\n pid: int = Field(\n description=\"UNIX process ID (PID) of the process, or 0 if the process is not running.\"\n )\n\n\nclass UnixStreamHTTPConnection(HTTPConnection):\n def connect(self):\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.sock.connect(self.host)\n\n\nclass UnixStreamTransport(client.Transport):\n def __init__(self, socket_path):\n self.socket_path = socket_path\n super(UnixStreamTransport, self).__init__()\n\n def make_connection(self, host):\n return UnixStreamHTTPConnection(self.socket_path)\n\n\nclass Supervisord:\n def __init__(\n self,\n address=SUPERVISORD_ADDRESS_HTTP,\n socket_path=SUPERVISORD_ADDRESS_UNIX_SOCKET\n ):\n self.address = address\n self.socket_path = socket_path\n self.server = client.ServerProxy(\n self.address,\n transport=UnixStreamTransport(self.socket_path)\n )\n\n def start(self, process):\n try:\n self.server.supervisor.startProcess(process)\n except client.Fault as fault:\n if fault.faultCode != SupervisordProcessStates.ALREADY_STARTED:\n raise fault\n\n def stop(self, process_name: str):\n try:\n self.server.supervisor.stopProcess(process_name)\n except client.Fault as fault:\n if fault.faultCode != SupervisordProcessStates.NOT_RUNNING:\n raise fault\n\n def restart(self, process_name: str):\n process = self.process_info(process_name)\n\n self.stop(process.name)\n self.start(process.name)\n\n def view_logs(self, process):\n ...\n\n def version(self) -> str:\n return self.server.supervisor.getVersion()\n\n def process_info(self, process_name) -> ProcessInfo:\n process = self.server.supervisor.getProcessInfo(process_name)\n\n if not process:\n RuntimeError('process not found')\n\n return ProcessInfo(**process)\n\n def all_process_info(self) -> typing.List[ProcessInfo]:\n process_info = []\n\n for process in self.server.supervisor.getAllProcessInfo():\n process_info.append(ProcessInfo(**process))\n\n return process_info\n\n def reload_config(self) -> None:\n self.server.supervisor.reloadConfig()\n","repo_name":"augustoliks/supervisord-fastapi","sub_path":"src/supervisord.py","file_name":"supervisord.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"9439482898","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\turl(r'^admin/' , include(admin.site.urls)),\n\turl(r'^$' , 'app.views.indice'),\n\turl(r'^login' , 'app.views.login_view'),\n\turl(r'^ayuda' , 'app.views.ayuda'),\n\turl(r'^logout' , 'app.views.logout_view'),\n\turl(r'^portales/(.+)' , 'app.views.portales'),\n\turl(r'^image' , 'app.views.image'),\n\turl(r'^css' , 'app.views.css'),\n\turl(r'^eventos' , 'app.views.eventos'),\n\turl(r'^actividad/(.+)' , 'app.views.actividad'),\n\turl(r'^(.+)/RSS' , 'app.views.canalRSS'),\n\turl(r'^(.+)' , 'app.views.usuario')\n\t)\n\n","repo_name":"malonsore/2015-saro-pfinal","sub_path":"2015-saro-pfinal/Final/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8859787810","text":"import sqlite3\n\nconn = sqlite3.connect('test.db')\ncursor = conn.cursor() # cursor : 데이터 베이스와 sql문을 연결해주는 extute함수 제공\n\ncursor.execute(\"select * from phonebook\") # 실행\n\nrows = cursor.fetchall() # 데이터를 가져옴(꺼내오기)\n\nfor row in rows:\n print(\"name:{0}, phone:{1}, email:{2}\".format(row[0],row[1],row[2]))\n\ncursor.close()\nconn.close()\n\n\n\n\n\n","repo_name":"kogkuemryong/python","sub_path":"chap11/03_select_record.py","file_name":"03_select_record.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37538894818","text":"import customtkinter as ctk\r\nimport customtkinter\r\nfrom tkinter import filedialog\r\nfrom tkinter import *\r\nimport os,time\r\nimport main\r\nimport threading\r\nimport json\r\nfrom tkinter import messagebox\r\nimport backend\r\n\r\n\r\nclass App(customtkinter.CTk):\r\n def __init__(self):\r\n #all configuration\r\n \r\n super().__init__()\r\n customtkinter.set_appearance_mode(\"system\") \r\n ctk.set_default_color_theme(\"dark-blue\")\r\n self.title(\"my app\")\r\n self.geometry(\"1000x600\")\r\n self.grid_columnconfigure(1, weight=1)\r\n self.grid_rowconfigure(0, weight=1)\r\n \r\n \r\n # defining variables\r\n self.bk=main.backend_process(CWP=\"default\")\r\n self.Direct_Backend=backend.backendworks(CWP=\"default\")\r\n self.removeModules_FromCreatedProj_list=[]\r\n self.installModuleToProject=[]\r\n self.moduleCheckBoxList_name=[]#this list contains the checkbox that are created in for loop\r\n self.moduleVersionList=[]\r\n self.Mdnvrlist=[]\r\n self.moduledata=self.Direct_Backend.getModules_dict()\r\n self.ddir=\"None\"\r\n self.Rem_module_data=[]\r\n \r\n #crating main frames\r\n self.nav=ctk.CTkFrame(self)\r\n self.nav.grid(sticky=\"ns\",padx=10,pady=10)\r\n\r\n self.startbtn=ctk.CTkButton(self.nav,text=\"➕ New project\",fg_color=\"#0286fa\",command=self.Frame_createprojframe)\r\n self.startbtn.grid(column=0,padx=10,pady=10)\r\n\r\n # displaying projects\r\n with open('data.json', 'r') as file:\r\n ProjectData = json.load(file)\r\n\r\n for project in ProjectData[\"Project\"]:\r\n viewProvectbtn=ctk.CTkButton(self.nav,text=project,command=lambda i=project: self.viewProjectFunc(projectName=i))\r\n viewProvectbtn.grid(column=0,padx=10,pady=10)\r\n \r\n # creating widgets\r\n self.workspaceFrame=ctk.CTkFrame(self)\r\n self.workspaceFrame.grid(row=0,column=1,sticky=\"nsew\",pady=10)\r\n self.workspaceFrame.grid_columnconfigure(1, weight=200)\r\n self.workspaceFrame.grid_rowconfigure(0,weight=200)\r\n\r\n self.python_projsettings=ctk.CTkFrame(self.workspaceFrame)\r\n self.python_projsettings.grid(column=1,row=0,sticky=\"nsew\",padx=10,pady=10)\r\n # creating widgets of CreateNewProject \r\n self.python_projsettings.grid_columnconfigure(1,weight=2)\r\n self.python_projsettings.grid_columnconfigure(1,weight=20)\r\n self.python_projsettings.grid_rowconfigure(6,weight=2)\r\n\r\n \"\"\"all the widgets for the create python project \"\"\"\r\n self.ChooseDir=ctk.CTkButton(self.python_projsettings,text=\"Choose folder\",command=self.choosedir)\r\n self.CancleCreatingProject=ctk.CTkButton(self.python_projsettings,text=\"Cancle\",fg_color=\"#a83232\",width=50,command=self.Frame_remprojframe)\r\n self.path_lable=ctk.CTkLabel(self.python_projsettings,text=\"placeholder/path/to/project\")\r\n \r\n self.rdcheck=IntVar()\r\n self.readmecheck=ctk.CTkCheckBox(self.python_projsettings,text=\"Readme\",onvalue=1,variable=self.rdcheck)\r\n \r\n self.staticcheckvar=IntVar()\r\n self.staticcheck=ctk.CTkCheckBox(self.python_projsettings,text=\"Static folder\",onvalue=1,variable=self.staticcheckvar)\r\n \r\n self.reqcheckvar=IntVar()\r\n self.ReqTxtCheck=ctk.CTkCheckBox(self.python_projsettings,text=\"Req.txt\",onvalue=1,variable=self.reqcheckvar)\r\n \r\n self.srcditcheck=IntVar()\r\n self.SrcDirCheck=ctk.CTkCheckBox(self.python_projsettings,text=\"SourceCode folder\",onvalue=1,variable=self.srcditcheck) \r\n \r\n self.createbtn=ctk.CTkButton(self.python_projsettings,text=\"Create\",fg_color=\"#0286fa\",command=self.createproject)\r\n \r\n self.addMbtn=ctk.CTkButton(self.python_projsettings,text=\"✔️\",fg_color=\"#32a852\",width = 50 ,command=self.addmodule)\r\n self.remMbtn=ctk.CTkButton(self.python_projsettings,text=\"❌\",fg_color=\"#a83232\",width = 50,command=self.remmodule)\r\n \r\n self.VersionWariningLable=ctk.CTkLabel(self.python_projsettings,text=\"invalid version \",text_color=\"red\")\r\n\r\n self.versionVar=StringVar()\r\n self.versionentry=ctk.CTkEntry(self.python_projsettings,textvariable=self.versionVar)\r\n\r\n self.ModuleWariningLable=ctk.CTkLabel(self.python_projsettings,text=\"invalid module\",text_color=\"red\")\r\n\r\n self.moduleVar=StringVar()\r\n self.moduleEntey=ctk.CTkEntry(self.python_projsettings,textvariable=self.moduleVar)\r\n\r\n self.modulenameframe=ctk.CTkScrollableFrame(self.python_projsettings)\r\n # self.moduleversionframe=ctk.CTkScrollableFrame(self.python_projsettings)\r\n \r\n self.addmodulesentryes(moduledata_=self.moduledata)\r\n \r\n \r\n \"\"\"all the wordgets for Viewing creted project \"\"\"\r\n\r\n self.saveChangeBtn=ctk.CTkButton(self.python_projsettings,text=\"Save\",fg_color=\"#32a852\",width=50 ,command=self.saveProject)\r\n \r\n def viewProjectFunc(self,projectName):\r\n self.Direct_Backend.changeproject(projectName)\r\n \r\n self.addMbtn.grid(column=2,row=5,padx=10,pady=30,sticky=\"n\")\r\n self.remMbtn.grid(column=3,row=5,padx=10,pady=30,sticky=\"n\")\r\n self.CancleCreatingProject.grid(row=0,column=3,padx=10,pady=10)\r\n self.versionentry.grid(row=5,column=0,sticky=\"we\",padx=10,pady=10)\r\n self.moduleEntey.grid(row=5,column=1,sticky=\"we\",padx=10,pady=10)\r\n self.modulenameframe.grid(row=6,column=0,sticky=\"nwe\",padx=10,columnspan=3)\r\n self.moduledata=self.Direct_Backend.getmodules_json()\r\n self.saveChangeBtn.grid(row=0,column=2,padx=10,pady=10)\r\n self.CancleCreatingProject.configure(command=self.Frame_remViewprojframe)\r\n\r\n self.addmodulesentryes(moduledata_=self.moduledata)\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid(padx=10,pady=10)\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid(padx=10,pady=10)\r\n\r\n self.startbtn.configure(state=\"disabled\")\r\n\r\n def addmodulesentryes(self,moduledata_):\r\n #refreashing the lists for use\r\n self.moduleCheckBoxList_name=[]\r\n self.moduleVersionList_version=[]\r\n self.Mdnvrlist=[]\r\n self.Mdnlist=[]\r\n \r\n for item in moduledata_[\"modules\"]:\r\n varMdn=StringVar()\r\n self.Mdnlist.append(varMdn)\r\n mdlNamecheckB=ctk.CTkCheckBox(self.modulenameframe,text=item,variable=varMdn,onvalue=item,offvalue=\"off\")\r\n self.moduleCheckBoxList_name.append(mdlNamecheckB)\r\n \r\n mdlVersioncheckB=ctk.CTkLabel(self.modulenameframe,text=self.moduledata[\"modules\"][item])\r\n self.moduleCheckBoxList_name.append(mdlVersioncheckB)\r\n \r\n def blank(self,*args):\r\n print(args)\r\n pass\r\n \r\n def Frame_createprojframe(self):\r\n self.ChooseDir.grid(padx=10,pady=10)\r\n self.path_lable.grid(column=1,row=0,sticky=\"w\",padx=10,columnspan=10)\r\n self.CancleCreatingProject.grid(row=0,column=3,padx=10,pady=10)\r\n\r\n self.readmecheck.grid(row=1,column=0,sticky=\"nw\",padx=10,pady=30)\r\n self.staticcheck.grid(row=2,column=0,sticky=\"nw\",padx=10)\r\n self.ReqTxtCheck.grid(row=1,column=1,sticky=\"nw\",padx=10,pady=30)\r\n self.SrcDirCheck.grid(row=2,column=1,sticky=\"nw\",padx=10)\r\n self.createbtn.grid(column=1,row=3,padx=10,pady=30,sticky=\"nw\")\r\n self.addMbtn.grid(column=2,row=5,padx=10,pady=30,sticky=\"n\")\r\n self.remMbtn.grid(column=3,row=5,padx=10,pady=30,sticky=\"n\")\r\n\r\n self.versionentry.grid(row=5,column=0,sticky=\"we\",padx=10,pady=10)\r\n self.moduleEntey.grid(row=5,column=1,sticky=\"we\",padx=10,pady=10)\r\n \r\n self.modulenameframe.grid(row=6,column=0,sticky=\"nswe\",padx=10,columnspan=3)\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid(column=1,padx=10,pady=10)\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid(column=0,padx=10,pady=10)\r\n self.startbtn.configure(state=\"disabled\")\r\n \r\n def Frame_remprojframe(self):\r\n self.python_projsettings.grid_forget()\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid_forget()\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid_forget() \r\n self.startbtn.configure(state=\"normal\")\r\n\r\n def Frame_remViewprojframe(self):\r\n self.python_projsettings.grid_forget()\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid_forget()\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid_forget()\r\n self.saveChangeBtn.grid_forget() \r\n self.startbtn.configure(state=\"normal\")\r\n \r\n def loop(self):\r\n self.mainloop()\r\n\r\n def choosedir(self):\r\n self.ddir=filedialog.askdirectory()\r\n self.path_lable.configure(text=self.ddir)\r\n \r\n def addmodule(self):#adds module to be installed \r\n if self.Direct_Backend.getModule(self.moduleVar.get(),self.versionVar.get())==\"no such module\":\r\n self.ModuleWariningLable.grid(row=4,column=1,sticky=\"s\")\r\n \r\n \r\n elif self.Direct_Backend.getModule(self.moduleVar.get(),self.versionVar.get())==\"no such version\":\r\n self.VersionWariningLable.grid(row=4,column=0,sticky=\"s\")\r\n \r\n else:\r\n self.ModuleWariningLable.grid_forget()\r\n self.VersionWariningLable.grid_forget()\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid_forget()\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid_forget()\r\n \r\n # tempdict={self.moduleVar.get():self.versionVar.get()}\r\n self.moduledata[\"modules\"][self.moduleVar.get()] = str(self.versionVar.get())\r\n self.installModuleToProject[self.moduleVar.get()] = str(self.versionVar.get())\r\n self.addmodulesentryes(moduledata_=self.moduledata)\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid(column=1,padx=10,pady=10)\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid(column=0,padx=10,pady=10)\r\n \r\n def remmodule(self):#removes modules to be deleted or to me not installed\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid_forget()\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid_forget()\r\n \r\n self.removeModules_FromCreatedProj_list=[]\r\n \r\n i=0\r\n for var_ in self.Mdnlist:\r\n if var_.get() != \"off\":\r\n if var_.get() in self.moduledata[\"modules\"]:\r\n self.removeModules_FromCreatedProj_list.append(var_.get())\r\n del self.moduledata[\"modules\"][var_.get()]\r\n var_.set(0)\r\n self.Mdnlist.remove(var_)\r\n self.moduleCheckBoxList_name.pop(i)\r\n\r\n i=i+1\r\n self.addmodulesentryes(moduledata_=self.moduledata)\r\n for Cb in self.moduleCheckBoxList_name:\r\n Cb.grid(column=1,padx=10,pady=10)\r\n for Cb in self.moduleVersionList_version:\r\n Cb.grid(column=0,padx=10,pady=10)\r\n pass \r\n # self.Direct_Backend.Uninstall_Modules( Mdl=removeModules_FromCreatedProj_list)\r\n def createproject(self):\r\n if self.ddir==\"None\":\r\n messagebox.showwarning(\"Warning\", \"Choose the directory!\")\r\n return True\r\n self.bk.createproj(r=self.rdcheck.get(),st=self.staticcheckvar.get(),sr=self.srcditcheck.get(),rq=self.reqcheckvar.get(),d=self.ddir,md=self.moduledata)\r\n\r\n self.Frame_remprojframe()\r\n\r\n def saveProject(self):\r\n self.Frame_remViewprojframe()\r\n self.Direct_Backend.Uninstall_Modules( self.removeModules_FromCreatedProj_list)\r\n print(self.removeModules_FromCreatedProj_list)\r\n self.Direct_Backend.Install_Module( self.installModuleToProject)\r\n \r\n pass\r\nif __name__==\"__main__\":\r\n\r\n app = App()\r\n app.mainloop()","repo_name":"lecrowpus/Projectmannager-","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":12018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17574414119","text":"# 作者 :duty\n# 创建时间 :2022/6/23 3:35 下午\n# 文件 :SDIN.py\nimport torch\nfrom torch import nn\n\n\n## 参考阿里SDM的模型结构, 当前窗口为短期兴趣,历史窗口为长期兴趣;当前窗口通过多头注意力机制提取用户的多个兴趣,通过selfattention过滤掉无关的行为\n## 参考链接 https://blog.csdn.net/wuzhongqiang/article/details/123856954\nfrom model_and_train.sdm_attention import SdmAttention\nfrom model_and_train.sampled_softmax_loss import SampledSoftmax\n\nclass TzSdm(nn.Module):\n\tdef __init__(self, user_num, shop_num, category_num,item_num, current_window_seq_length, history_window_seq_length, embedding_dim, lstm_hidden_size,categorical_feature_num,\n\t\t\t\t use_sampled_softmax=False):\n\t\tsuper().__init__()\n\t\tself.user_embed_layer = nn.Embedding(user_num, embedding_dim)\n\t\t# self.target_shop_embed_layer = nn.Embedding(shop_num, embedding_dim)\n\t\t# self.target_category_embed_layer = nn.Embedding(category_num, embedding_dim)\n\t\t# self.current_window_shop_embed_layer = nn.Embedding(shop_num, embedding_dim)\n\t\t# self.current_window_category_embed_layer = nn.Embedding(category_num, embedding_dim)\n\t\t# self.history_window_shop_embed_layer = nn.Embedding(shop_num, embedding_dim)\n\t\t# self.history_window_category_embed_layer = nn.Embedding(category_num, embedding_dim)\n\t\t## 是分开每个单独embedding还是只用一个实例化embedding 那个效果好 ?????\n\t\tself.shop_embed_layer = nn.Embedding(shop_num, embedding_dim)\n\t\tself.category_embed_layer = nn.Embedding(category_num, embedding_dim)\n\t\tself.multi_head_attention_layer = nn.MultiheadAttention(embedding_dim, 2)\n\t\tself.shop_lstm_layer = nn.LSTM(input_size=embedding_dim, hidden_size=lstm_hidden_size, batch_first=True, num_layers=2)\n\t\tself.category_lstm_layer = nn.LSTM(input_size=embedding_dim, hidden_size=lstm_hidden_size, batch_first=True,num_layers=2)\n\t\tself.self_attention = SdmAttention(embedding_dim*2)\n\t\tself.history_dense_layer = nn.Linear(embedding_dim*categorical_feature_num, embedding_dim)\n\t\tself.gate_dense_layer = nn.Linear(embedding_dim*3, embedding_dim)\n\t\tself.sampled_softmax = SampledSoftmax(item_num,nhid=embedding_dim, nsampled=200,tied_weight=None)\n\t\tself.use_sampled_softmax = use_sampled_softmax\n\n\tdef forward(self, input_data):\n\t\ttarget_item_shop_id, target_item_category_id, current_window_shop_ids, current_window_category_ids, history_window_shop_ids, \\\n\t\thistory_window_category_ids, user_id = input_data\n\t\tuser_embedding = self.user_embed_layer(user_id)\n\t\t## 短期用户行为结构\n\t\tcurrent_window_shop_embedding = self.shop_embed_layer(current_window_shop_ids)\n\t\tcurrent_window_category_embedding = self.category_embed_layer(current_window_category_ids)\n\t\thistory_window_shop_embedding = self.shop_embed_layer(history_window_shop_ids)\n\t\thistory_window_category_embedding = self.category_embed_layer(history_window_category_ids)\n\t\t## 当前的窗口先经过lstm, 然后多头注意力,然后selfattention;每个lstm的输入为商品的某一类特征:比如shop的序列,category的序列等\n\t\tcurrent_shop_lstm_out = self.shop_lstm_layer(current_window_shop_embedding)[0]\n\t\t##输出output,(hn,cn), output是最后一层每个时间步的输出, 所以取output[:,-1,:], hn为每个时间步最后一层的输出\n\t\tcurrent_category_lstm_out = self.category_lstm_layer(current_window_category_embedding)[0]\n\t\tmha_in = torch.cat([current_shop_lstm_out, current_category_lstm_out], dim=1)\n\t\tmha_out = self.multi_head_attention_layer(mha_in,mha_in, mha_in)[0] ## Q K V都一样, 输出为[B T E], 返回att和att_weights\n\t\t## 然后跟user做selfattention\n\t\tself_att_out = self.self_attention(user_embedding, mha_out) ## [B 1 E]\n\t\tshort_out = torch.squeeze(self_att_out, 1)\n\t\t## 长期用户行为结构,根据每个类别特征同user做attention最后concat\n\t\tshop_att_out = torch.squeeze(self.self_attention(user_embedding, history_window_shop_embedding),1)\n\t\tcategory_att_out = torch.squeeze(self.self_attention(user_embedding, history_window_category_embedding), 1)\n\t\thistory_cat_in = torch.cat([shop_att_out, category_att_out], dim=1)\n\t\tlong_out = self.history_dense_layer(history_cat_in)\n\n\t\t##长短期兴趣融合\n\t\tlong_short_cat_in = torch.cat([short_out, long_out, user_embedding], dim=1)\n\t\tlong_short_cat_out = self.gate_dense_layer(long_short_cat_in) ## [B E]\n\t\tgate_values = torch.sigmoid(long_short_cat_out)\n\t\tnegative_gate_values = torch.ones_like(gate_values)-gate_values\n\t\tlong_short_out = gate_values*short_out+negative_gate_values*long_out\n\t\tif not self.use_sampled_softmax:\n\t\t\ttarget_shop_embedding = self.shop_embed_layer(target_item_shop_id)\n\t\t\ttarget_category_embedding = self.category_embed_layer(target_item_category_id)\n\t\t\t## 如果是计算相似度\n\t\t\ttarget_embedding = torch.add(target_shop_embedding,target_category_embedding)\n\t\t\t# 简化了损失函数的计算,直接余弦距离\n\t\t\tcos_out = torch.cosine_similarity(long_short_out, target_embedding)\n\t\t\treturn torch.sigmoid(cos_out)\n\t\telse:\n\t\t\treturn long_short_out\n","repo_name":"dutyhong/sdm","sub_path":"model_and_train/SDM.py","file_name":"SDM.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"683882507","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n for i in range(0,n):\n index = m+i\n nums1[index] = nums2[i]\n\n nums1.sort()\n\n\n## Another Solution\n\nclass Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if len(nums1) == 0 or len(nums2) == 0:\n return\n\n i = m - 1\n j = n - 1\n point = m + n - 1\n\n while i >= 0 and j >= 0:\n if nums1[i] >= nums2[j]:\n nums1[point] = nums1[i]\n i -= 1\n point -= 1\n else:\n nums1[point] = nums2[j]\n j -= 1\n point -= 1\n\n while j >= 0:\n nums1[point] = nums2[j]\n j -= 1\n pt -= 1\n","repo_name":"FIRESTROM/Leetcode","sub_path":"Python/088__Merge_Sorted_Array.py","file_name":"088__Merge_Sorted_Array.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16472859215","text":"from __future__ import print_function\nfrom torch import optim\nimport torch as t\n\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n# import matplotlib.pyplot as plt\nimport torchvision as tv\nimport torchvision.transforms as transforms\nfrom torchvision.transforms import ToPILImage\nshow = ToPILImage()\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalize(mean, std)\n])\n\n\n# 训练集\ntrain_set = tv.datasets.CIFAR10(\n root='/Users/weiyangbin/Downloads/cifar-100-python/',\n train=True,\n download=True,\n transform=transform\n)\n\n\ntrain_loader = t.utils.data.DataLoader(\n train_set,\n batch_size=4,\n shuffle=True,\n num_workers=2\n)\n\ntest_set = tv.datasets.CIFAR10(\n root='Users/weiyangbin/Downloads/cifar-100-python',\n train=False,\n download=True,\n transform=transform\n)\n\ntest_loader = t.utils.data.DataLoader(\n test_set,\n batch_size=4,\n shuffle=False,\n num_workers=2\n)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n\n# (data, label) = train_set[100]\n# show((data + 1) / 2).resize((100, 100))\n# cnt = 0\n# to_plt_image = transforms.ToPILImage()\n# for image, label in train_loader:\n# if cnt > 2:\n# break\n# print(label)\n#\n# img = to_plt_image(image[0])\n# img.show()\n#\n# plt.imshow(img)\n# plt.show()\n# cnt += 1\n\n\n# data_iter = iter(train_loader)\n# images, label = data_iter.next()\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(x.size()[0], -1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x\n\n\nnet = Net()\nprint(net)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nt.set_num_threads(8)\nfor epoch in range(5):\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n\n # 输入数据\n inputs, labels = data\n\n # 梯度清零\n optimizer.zero_grad()\n\n # 前向传播,方向传播\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n\n # 更新参数\n optimizer.step()\n\n # 打印log信息\n # loss是一个scalar, 需要loss.item()来获取数值,不能用loss[0]\n running_loss += loss.item()\n if i % 2000 == 1999:\n print('[%d, %5d] loss: %.3f'\\\n % (epoch+1, i+1, running_loss / 2000))\n running_loss = 0.0\n\nprint('Finish Training')\n\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\nprint('实际label: ', ' '.join(\\\n '%08s' % classes[labels[j]] for j in range(4)))\n\n\noutputs = net(images)\n_, predicted = t.max(outputs.data, 1)\n\nprint('预测结果:', ' '.join(\\\n '%5s' % classes[predicted[j]] for j in range(4)))\n\ncorrect = 0\ntotal = 0\n\nwith t.no_grad():\n for data in test_loader:\n images, labels = data\n outputs = net(images)\n _, predicted = t.max(outputs, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\nprint('10000张测试集中准确率为: %d %%' % (100 * correct / total))\n","repo_name":"WeiYangBin/Notes-Deep-Learning","sub_path":"LeNet-torch.py","file_name":"LeNet-torch.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73403857752","text":"from socket import *\nimport threading\nimport sys\nimport random\nimport string\nimport os\n\n\ndef handle_connection(client, client_address):\n print(\"connection from address: {!s}\".format(client_address))\n\n msg_type_bytes = client.recv(1)\n msg_type = int.from_bytes(msg_type_bytes, byteorder=sys.byteorder)\n\n msg_size_bytes = client.recv(msg_type - 1)\n msg_size = int.from_bytes(msg_size_bytes, byteorder=sys.byteorder)\n\n print(\"msg size: {}\".format(msg_size))\n data = client.recv(msg_size)\n if msg_type == 1:\n print(data)\n else:\n filename = write_file(data)\n print(\"saved bytes to file: {}\".format(filename))\n\n client.sendall(data)\n\n client.close()\n\n\ndef write_file(data, filename=None):\n if not filename:\n filename_length = 8\n filename = \"\".join([random.SystemRandom().choice(\n string.ascii_letters + string.digits) for n in range(filename_length)])\n\n filename += \".bin\"\n\n try:\n with open(\"./\" + filename, \"wb\") as f:\n f.write(data)\n except EnvironmentError as e:\n print(\"error writing file: {}\".format(e))\n return None\n\n return filename\n\n\ndef main():\n server = socket(AF_INET, SOCK_STREAM)\n\n port = 5000\n server.bind(('', port))\n\n server.listen(5)\n print(\"server listening on: {!s}\".format(port))\n\n while True:\n (client, address) = server.accept()\n\n client_thread = threading.Thread(\n target=handle_connection, args=(client, address))\n\n client_thread.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Dudeslam/NetworkSecurity","sub_path":"SharedFOlder/Assignment2/Task2-TCP/server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40665527445","text":"from typing import List\n\n\nclass Solution:\n def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n m = len(grid)\n n = len(grid[0])\n dx = [-1, -1, 0, 1, 1, 1, 0 ,-1]\n dy = [0, 1, 1, 1, 0, -1, -1, -1]\n queue_front = []\n queue_end = []\n if grid[0][0] != 0:\n return -1\n if 0 == m-1 and 0 == n-1:\n return 1\n if grid[m-1][n-1] != 0:\n return -1\n # 初始值是第一个点\n queue_front.append((0,0))\n # 初始值是最后一个点\n queue_end.append((m-1,n-1))\n visited_front = dict()\n visited_front[(0,0)] = 1\n visited_end = dict()\n visited_end[(m-1, n-1)] = 1\n self.result = -1\n def bfs(queue, visited, visited_other):\n new_queue = []\n for node in queue:\n for direction in range(8):\n new_x = node[0] + dx[direction]\n new_y = node[1] + dy[direction]\n if 0 <= new_x < m and 0 <= new_y < n and grid[new_x][new_y] == 0:\n if (new_x, new_y) not in visited:\n if (new_x, new_y) in visited_other:\n # print(node[0], node[1], new_x, new_y)\n self.result = visited[(node[0], node[1])] + visited_other[(new_x, new_y)]\n return\n visited[(new_x, new_y)] = visited[(node[0], node[1])] + 1\n new_queue.append((new_x, new_y))\n return new_queue\n\n while queue_front and queue_end:\n queue_front = bfs(queue_front, visited_front, visited_end)\n if self.result > -1:\n return self.result\n queue_end = bfs(queue_end, visited_end, visited_front)\n if self.result > -1:\n return self.result\n return -1\n","repo_name":"wakalubiubiu/algorithm2021","sub_path":"week_09/homework/shortestPathBinaryMatrix.py","file_name":"shortestPathBinaryMatrix.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72457324951","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom SOM import train_SOM,feature_normalization,get_U_Matrix,get_winner_index,weights_PCA\r\nfrom collections import defaultdict, Counter\r\nimport matplotlib.gridspec as gridspec\r\nif __name__ == \"__main__\":\r\n \r\n # 读取iris数据\r\n datas = np.loadtxt(\"iris.data\",delimiter=\",\",usecols=(0,1,2,3),dtype='float32')\r\n labs = np.loadtxt(\"iris.data\",delimiter=\",\",usecols=(4),dtype='str')\r\n N,D = np.shape(datas)\r\n \r\n # 数据预处理\r\n datas = datas/np.linalg.norm(datas,axis=1,keepdims=True)\r\n\r\n # 数据切分 分为训练接和测试集\r\n N_train = int(np.ceil(N*0.7))\r\n N_test = N-N_train\r\n print(N_train)\r\n rand_index = np.random.permutation(np.arange(N))\r\n \r\n train_datas = datas[rand_index[:N_train]]\r\n train_labs = labs[rand_index[:N_train]]\r\n \r\n test_datas = datas[rand_index[N_train:]]\r\n test_labs = labs[rand_index[N_train:]]\r\n \r\n # SOM 训练\r\n X=7\r\n Y=7\r\n weights = train_SOM(X=X,Y=Y,N_epoch=5,datas=train_datas,sigma=0.5,init_weight_fun=weights_PCA,seed=20)\r\n \r\n # 计算输出层的每个节点上映射了哪些数据\r\n win_map = defaultdict(list)\r\n for x,lab in zip(datas,labs):\r\n win_map[get_winner_index(x,weights)].append(lab)\r\n \r\n win_lab = defaultdict(list)\r\n for key in win_map.keys():\r\n win_lab[key] = max(win_map[key],key=win_map[key].count)\r\n print(win_lab)\r\n \r\n # 进行测试:\r\n n_right = 0\r\n for i in range(N_test):\r\n x = test_datas[i]\r\n win = get_winner_index(x,weights)\r\n \r\n if win in win_lab.keys():\r\n det_lab = win_lab[win]\r\n else:\r\n det_lab = 'None'\r\n \r\n if det_lab == test_labs[i]:\r\n n_right = n_right+ 1\r\n \r\n # 计算准确率\r\n print('Accuracy = %.2f %%'%(n_right*100/N_test))\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # # seed 数据展示\r\n # columns=['area', 'perimeter', 'compactness', 'length_kernel', 'width_kernel',\r\n # 'asymmetry_coefficient', 'length_kernel_groove', 'target']\r\n # data = pd.read_csv('seeds_dataset.txt', \r\n # names=columns, \r\n # sep='\\t+', engine='python')\r\n # labs = data['target'].values\r\n # label_names = {1:'Kama', 2:'Rosa', 3:'Canadian'}\r\n # datas = data[data.columns[:-1]].values\r\n # N,D = np.shape(datas)\r\n # print(N,D)\r\n \r\n # # 对训练数据进行正则化处理\r\n # datas = feature_normalization(datas)\r\n \r\n # # SOM的训练\r\n # X=3\r\n # Y=1\r\n # weights = train_SOM(X=X,Y=Y,N_epoch=4,datas=datas,sigma=1.5,init_weight_fun=weights_PCA)\r\n \r\n # # 实现聚类\r\n \r\n # # 获取聚类的编号\r\n # index_clusters = []\r\n # for i in range(N):\r\n # x = datas[i]\r\n # winner = get_winner_index(x,weights)\r\n # index_clusters.append(winner[0]*Y+winner[1])\r\n \r\n \r\n # for c in np.unique(index_clusters):\r\n \r\n # ii = np.where(index_clusters==c)[0]\r\n \r\n # plt.scatter(datas[ii, 0],\r\n # datas[ii, 2], label='cluster='+str(c), alpha=.7)\r\n # plt.legend() \r\n # for i in range(X):\r\n # for j in range(Y):\r\n # plt.scatter(weights[i,j,0], weights[i,j,2], marker='x', \r\n # s=80, linewidths=1, color='k')\r\n # plt.legend()\r\n # plt.show()\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"Feliks151450/SOM","sub_path":"SOM_Classification.py","file_name":"SOM_Classification.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72208774873","text":"\"\"\"\nScript to load the csv data in the database\n\"\"\"\nimport pandas as pd\nfrom chat_app.models import User, Message, Thread\nfrom datetime import datetime\n\nmessage_df = pd.read_csv('message_data.csv')\ndf = pd.DataFrame(message_df)\n\nid =0\ndf[\"priority\"] = \"\"\nfor index, row in df.iterrows(): \n # dates = datetime.strptime(str(row[\"Timestamp (UTC)\"]), \"%Y-%m-%d %H:%M:%S\"),\n message = row[\"Message Body\"]\n if \"payment\" and \"loan\" in message:\n priority = 1\n elif \"payment\" and not \"loan\" in message:\n priority = 2\n elif \"loan\" in message:\n priority = 1\n else:\n priority = 3\n df.at[index,\"priority\"] = priority\n\ndf2 = df.groupby('User ID')[['Timestamp (UTC)', 'priority']].min()\ndicts = {}\nfor index, row in df2.iterrows():\n dates = datetime.strptime(str(row[\"Timestamp (UTC)\"]), \"%Y-%m-%d %H:%M:%S\"),\n dicts[str(index)] = {\"timestamp\":dates, \"priority\":row[\"priority\"]}\n\nfor index, row in df.iterrows():\n username = str(row[\"User ID\"])\n message = row[\"Message Body\"]\n timestamp = row[\"Timestamp (UTC)\"]\n if User.objects.filter(username=username).exists():\n user = User.objects.get(username=username)\n thread = Thread.objects.get(client_id=user.id)\n message = Message(thread_id = thread.id, sender_id = user.id,message_body=message, timestamp=timestamp)\n message.save()\n \n else:\n user = User(username=username, password=username, role=\"client\")\n user.save()\n thread = Thread(client_id = user.id, start_time=dicts[username][\"timestamp\"], thread_type = dicts[username][\"priority\"])\n thread.save()\n message = Message(thread_id = thread.id, sender_id = user.id,message_body=message, timestamp=timestamp)\n message.save()\n\nprint(\"Data uploaded\")\n ","repo_name":"jahnavi0102/chat-app","sub_path":"chat_app/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37282128841","text":"mTnD = { }\r\nestados = input()\r\nsigma = input()\r\ntal = input()\r\nsimbolo_inicio = input()\r\nbranco = input()\r\n\r\ntransicoes = int(input())\r\n\r\nfor i in range(transicoes):\r\n\r\n\tquintupla = input()\r\n\tquintupla = quintupla.split(' ')\r\n\tchave = (quintupla[0],quintupla[1])\r\n\r\n\tif chave not in mTnD:\r\n\t\tmTnD[chave] = []\r\n\r\n\tmTnD[chave].append([quintupla[2],quintupla[3],quintupla[4]])\r\n\r\ninicio = input()\r\nfinal = input()\r\npalavras = input()\r\npalavras = palavras.split(' ')\r\n\r\nfor palavra in palavras:\r\n\testado = inicio\r\n\tpilha_de_exec = []\r\n\tfita = []\r\n\tfita.append(simbolo_inicio)\r\n\r\n\tfor letra in palavra:\r\n\t\tfita.append(letra)\r\n\r\n\tfita.append(branco)\r\n\tcabecote = 1\r\n\tpilha_de_exec.append([fita,cabecote,estado])\r\n\r\n\twhile True:\r\n\t\tif(len(pilha_de_exec) != 0):\r\n\t\t\t\tdados = pilha_de_exec.pop(0)\r\n\t\t\t\testado = dados[2]\r\n\t\t\t\tcabecote = dados[1]\r\n\t\t\t\tfita = dados[0]\r\n\t\tif((estado,fita[cabecote]) in mTnD):\r\n\t\t\tfor acao in mTnD[(estado,fita[cabecote])]:\r\n\t\t\t\tnovafita = fita.copy()\t\t\t\t\r\n\t\t\t\tnovafita[cabecote] = acao[1]\r\n\t\t\t\tnovoestado = acao[0]\r\n\t\t\t\tif(acao[2] == \"D\"):\r\n\t\t\t\t\tnovoCabecote = cabecote + 1\r\n\t\t\t\telif(acao[2]==\"E\"):\r\n\t\t\t\t\tnovoCabecote = cabecote - 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tnovoCabecote = cabecote\r\n\t\t\t\tif(novoCabecote == len(novafita)):\r\n\t\t\t\t\tnovafita.append(branco)\r\n\t\t\t\tpilha_de_exec.append([novafita,novoCabecote,novoestado])\r\n\t\t\t\t\t\r\n\t\telse:\r\n\t\t\tif estado in final:\r\n\t\t\t\tbreak\r\n\r\n\t\t\tif(len(pilha_de_exec) == 0):\r\n\t\t\t\tbreak\r\n\r\n\tif estado not in final:\r\n\t\tprint('N')\r\n\telse:\r\n\t\tprint('S')\r\n","repo_name":"FVjesus/NDTM","sub_path":"mTnD.py","file_name":"mTnD.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"14577076336","text":"# coding=utf-8\r\nimport tensorflow as tf\r\nfrom keras.layers import BatchNormalization\r\n# from keras.models import load_model\r\nfrom matplotlib import pyplot as plt\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.applications import xception, vgg19\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.models import Sequential\r\n\r\nfrom image import reverse_image\r\nfrom training import training, training2\r\n\r\n\r\ndef create_model(base_model_name, classes, img_size, img_channels):\r\n if base_model_name == 'xception':\r\n base_model = xception.Xception(\r\n weights='imagenet',\r\n include_top=False,\r\n input_shape=(img_size[0], img_size[1], img_channels),\r\n pooling='avg')\r\n else:\r\n base_model = vgg19.VGG19(\r\n weights='imagenet',\r\n include_top=False,\r\n input_shape=(img_size[0], img_size[1], img_channels),\r\n pooling='avg')\r\n base_model.trainable = False\r\n\r\n top_model = Sequential()\r\n top_model.add(Dense(512, activation='relu', input_shape=base_model.output_shape[1:]))\r\n top_model.add(BatchNormalization())\r\n top_model.add(Dense(1024, activation='relu'))\r\n top_model.add(BatchNormalization())\r\n top_model.add(Dense(1024, activation='relu'))\r\n # top_model.add(Dropout(0.5))\r\n top_model.add(Dense(len(classes), activation='softmax'))\r\n\r\n training_model = tf.keras.models.Model(\r\n inputs=base_model.input,\r\n outputs=top_model(base_model.output)\r\n )\r\n\r\n training_model.compile(\r\n optimizer=keras.optimizers.SGD(learning_rate=0.001, momentum=0.01, nesterov=True), # lr-0.001\r\n loss='binary_crossentropy',\r\n metrics='accuracy'\r\n )\r\n training_model.summary()\r\n\r\n style_layer_output = training_model.get_layer('block5_conv1').output\r\n\r\n output_model = tf.keras.models.Model(\r\n inputs=training_model.input,\r\n outputs=(style_layer_output, training_model.output)\r\n )\r\n\r\n return training_model, output_model\r\n\r\n\r\ndef load_model(model_name):\r\n training_model = keras.models.load_model(model_name)\r\n\r\n style_layer_output = training_model.get_layer('block5_conv1').output\r\n\r\n output_model = tf.keras.models.Model(\r\n inputs=training_model.input,\r\n outputs=(style_layer_output, training_model.output)\r\n )\r\n\r\n return training_model, output_model\r\n\r\n\r\nbase_model_name = 'vgg19'\r\n\r\nclasses = ['cat', 'dog']\r\n\r\nimg_height = img_width = 224\r\nimg_size = (img_width, img_height)\r\nimg_channels = 3\r\n\r\n# training_model, output_model = create_model(base_model_name, classes, img_size, img_channels)\r\ntraining_model, output_model = load_model('model8_cats_vs_dogs_vgg19.h5')\r\ntraining2(training_model, output_model, base_model_name, 'model8_cats_vs_dogs_vgg19.h5', classes, img_size)\r\n\r\nresult_class_number, image = reverse_image(training_model, base_model_name, 0, img_size, 'dog_9236.png')\r\nplt.imshow(image)\r\nplt.show()\r\nimage.save(\"result.jpg\")\r\n","repo_name":"myusername-dot/inside-out-img","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33010672370","text":"# Advent of Code 2021 Day 15 - Part 1\nfrom queue import PriorityQueue\nimport sys\n\ndef processData(string):\n string = string.replace('\\n','')\n return string\n\n# Better way to get neighbours\ndef getNeighbours(x,y,matrix,cost):\n neighbours = []\n operations = [(0,1),(1,0),(0,-1),(-1,0)]\n for operation in operations:\n xAdd = x+operation[0]\n yAdd = y+operation[1]\n if xAdd >= 0 and xAdd < len(matrix[0]) and yAdd >= 0 and yAdd < len(matrix):\n neighbours.append((cost + matrix[yAdd][xAdd], (xAdd,yAdd)))\n return neighbours\n\n# Utilise Dijkstra with Priority Queue to Find the shortest Path from top left to bottom right\ndef dijkstra(matrix):\n # Priority queue will automatically sort tuples inserted into queue by cost in ascending order. O(logn) insertion time.\n queue = PriorityQueue()\n queue.put((0, (0,0)))\n # Create a shortest path matrix, utilising Tabulation Dynamic Programming to store the current shortest path for each coordinate\n spMatrix = [[sys.maxsize]*len(matrix[0]) for _ in range(len(matrix))]\n while not queue.empty():\n coord = queue.get()\n x = coord[1][0]\n y = coord[1][1]\n cost = coord[0]\n if x == 0 and y == 0:\n spMatrix[y][x] = 0\n if x == len(matrix[0]) - 1 and y == len(matrix) - 1:\n spMatrix[y][x] = cost\n break\n else:\n # grab neighbours\n neighbours = getNeighbours(x,y,matrix,cost)\n for neighbour in neighbours:\n neighbourX = neighbour[1][0]\n neighbourY = neighbour[1][1]\n neighbourCost = neighbour[0]\n # If neighbour cost is less than the neighbour cost that exists in the shortest path matrix, append it to priority queue\n if spMatrix[neighbourY][neighbourX] > neighbourCost:\n spMatrix[neighbourY][neighbourX] = neighbourCost\n queue.put(neighbour)\n return spMatrix[len(matrix) - 1][len(matrix[0]) - 1]\n\nf = open('input.txt', 'r')\ndata = f.readlines()\ndata = list(map(processData, data))\nf.close()\n\nmatrix = []\nfor lines in data:\n line = []\n for c in lines:\n line.append(int(c))\n matrix.append(line)\nanswer = dijkstra(matrix)\nprint(answer)\n","repo_name":"Zaikatana/advent-of-code-2021","sub_path":"Day 15/AOC2021-15a.py","file_name":"AOC2021-15a.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26168931749","text":"#! /usr/bin/env python3\n\n\"\"\"\nPROGRAMMER: Aiden Peace\nDATE: 7/6/2022\nTITLE: Exercise 8 - Rock Paper Scissors\nDESCRIPTION: Make a two-player Rock-Paper-Scissors game.\n\nRemember the rules:\n Rock beats scissors\n Scissor beats paper\n Paper beats rock\n\"\"\"\n\n\ndef main():\n\n # welcome and rules output to console for players understanding\n print(\"Welcome to Aiden's Rock-Paper-Scissors Game\")\n print(\"Rules: Type out the word \\\"Rock\\\", \\\"Paper\\\", \\\"Scissors\\\" to play that decision - X - Exit\")\n print() # print statement for spacing\n\n # taking players name and saving it in a variable for later usage\n player1_name = input(\"Player 1: Enter your name: \").capitalize()\n player2_name = input(\"Player 2: Enter your name: \").capitalize()\n print() # print statement for spacing\n\n # variable list:\n game_list = [\"Rock\", \"Paper\", \"Scissors\"]\n status = True\n rounds = 0\n player1_score = 0\n player2_score = 0\n\n while status or rounds < 3:\n\n # taking players playing decision\n player1 = input(player1_name + \" enter your play: \").capitalize()\n player2 = input(player2_name + \" enter your play: \").capitalize()\n\n # if the players enter the same thing we will output\n # \"Tie\" and the rounds will not be incremented.\n if player1 == player2:\n if player1 != 'X' and player2 != 'X':\n print('Tie')\n # Rock beats scissors : if the player uses rock to beat\n # Scissors then the program will output the winner of the round\n # and give them a point\n elif player1 == game_list[0] and player2 == game_list[2]:\n print(\"Rock Wins - \" + player1_name + \" earns a point\")\n print()\n player1_score += 1\n rounds += 1\n elif player2 == game_list[0] and player1 == game_list[2]:\n print(\"Rock Wins - \" + player2_name + \" earns a point\")\n print()\n player2_score += 1\n rounds += 1\n # Scissor beats paper : if the player uses scissors to beat\n # paper then the program will output the winner of the round\n # and give them a point\n elif player1 == game_list[2] and player2 == game_list[1]:\n print(\"Scissor Wins - \" + player1_name + \" earns a point\")\n print()\n player1_score += 1\n rounds += 1\n elif player2 == game_list[2] and player1 == game_list[1]:\n print(\"Scissor Wins - \" + player2_name + \" earns a point\")\n print()\n player2_score += 1\n rounds += 1\n # Paper beats rock : if the player uses paper to beat\n # rock then the program will output the winner of the round\n # and give them a point\n elif player1 == game_list[1] and player2 == game_list[0]:\n print(\"Paper Wins - \" + player1_name + \" earns a point\")\n print()\n player1_score += 1\n rounds += 1\n elif player2 == game_list[1] and player1 == game_list[0]:\n print(\"Paper Wins - \" + player2_name + \" earns a point\")\n print()\n player2_score += 1\n rounds += 1\n\n # when a player or both players press 'X'\n # we will tell them goodbye and then exit program\n if player1 == 'X' and player2 == 'X':\n print(\"Both of you are quitters... what a shame\")\n status = False\n elif player1 == 'X':\n print(player1_name + \" Doesn't want to play anymore Goodbye\")\n status = False\n elif player2 == 'X':\n print(player2_name + \" Doesn't want to play anymore Goodbye\")\n status = False\n\n # when we are complete with round 3 the game is over\n # we will then output the winner of the game and as\n # the players if they would like to play again\n if rounds == 3:\n if player1_score <= 3 and player2_score <= 2:\n print(player1_name + \" Has won this game\")\n elif player2_score <= 3 and player1_score <= 2:\n print(player2_name + \" Has won this game\")\n else:\n print(\"Something is wrong please tell Programmer what's going on\")\n print()\n check = input(\"Would you like to play again? Y/N: \").capitalize()\n if check == 'Y':\n rounds = 0\n continue\n else:\n print(\"Goodbye\")\n status = False\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Aiden-Peace300/practicepython","sub_path":"Exercise8.py","file_name":"Exercise8.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30501607244","text":"from django.views import View\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.generic import TemplateView\nfrom tournament.models import *\nfrom tournament.forms import *\nfrom . import constants\nfrom utils import calc_rating_change, parse_ajax_to_json, assign_location\n\n\nclass DayView(View):\n def get(self, request, day=None, start_match=-1):\n if not day:\n return HttpResponse(\"Invalid day\")\n context = {}\n db_matches = Match.objects.filter(day=day, published=True).order_by('round')\n\n for match in db_matches:\n match.orderedplayers = match.players.all().order_by('team')\n\n context['start_match'] = start_match\n context[\"matches\"] = db_matches\n\n return render(request, \"schedule/day\"+str(day)+\".html\", context)\n\n\nclass PlayerView(View):\n def get(self, request, player_id=None):\n\n if player_id:\n player = get_object_or_404(Player, id=player_id)\n else:\n return HttpResponse(\"Player not chosen\")\n\n winloss = player.get_wins()\n awards = player.format_awards()\n award_score = awards[0]\n score = winloss[0] + award_score\n availability = []\n date = [\"Jan 26\", \"Jan 27\", \"Feb 2\", \"Feb 3\"]\n for i in range(4):\n if player.is_available(i+1): availability.append(1)\n else:\n availability.append(0)\n\n # \"Mars 1\" \"Ceres 2\" \"Io 4\"\n preference = [bool(player.preference&1), bool(player.preference&2), bool(player.preference&4)]\n\n return render(request, \"playerdetail.html\", {\n \"player\": player, \"score\": score, \"award\": awards[1], \"availability\": zip(availability, date),\n \"loc\": preference,\n })\n\n\nclass MatchStagingView(View):\n # TODO:\n # Hovering over a player highlights all their occurances\n # Automatically sort matches\n # Multi select. When I click match frame, select all players in it.\n # Prevent veto'd locations\n #\n #\n #\n def get(self, request, day=None):\n if day: # TODO change p.name to anonmyous name\n players = [{'id': p.id, 'name': p.name + ' ({})'.format(p.bracket), 'bracket': p.bracket, 'team': p.team.id, 'played': p.get_num_matches()}\n for p in Player.objects.all() if p.is_available(day)]\n response_matches = [[{'id': p.id, 'name': p.name + ' ({})'.format(p.bracket), 'bracket': p.bracket, 'team': p.team.id, 'played': p.get_num_matches()}\n for p in m.get_players()] #SORRY!!!!!!!!!! CLOSE YOUR EYES!!! DONT READ THIS LINE!!!\n for m in Match.objects.filter(day=day, published=False)]\n return JsonResponse({'players': players, \"matches\": response_matches})\n else:\n return render(request, 'matchmaking/staging.html', {\"days\": range(1, 1 + constants.days),\n \"num_matches\": constants.matches})\n\n def post(self, request, day=None):\n Match.objects.filter(day=day, published=False).delete()\n if not request.POST:\n return HttpResponse(\"Found no matches\")\n\n json_response = parse_ajax_to_json(request.body)\n matches = json_response['matches']\n for i, match in enumerate(matches):\n players = match['players']\n if players:\n m = Match(day=day, published=False, mode=\"F\", round=i+1)\n m.save()\n for p in players:\n #parse through the weird transofmration i did in javascript\n parsed_id = p['id'][p['id'].find('-')+1:]\n parsed_id = parsed_id[:parsed_id.find('-')]\n player = Player.objects.get(id=parsed_id)\n m.players.add(player)\n assign_location(m)\n m.save()\n\n\n return HttpResponse(\"Done\")\n\n\nclass PlayersView(View):\n def get(self, request):\n teams = Team.objects.all().order_by('id')\n for t in teams:\n res = 0\n for player in t.player_set.all():\n res += player.get_score()\n t.teamscore = round(res,1)\n\n return render(request, 'players.html', {\"teams\": teams})\n\n\nclass ScoreMatchView(View):\n # TODO: stop taking num_awards as argument. instead add button to add or remove award forms at will in the template\n def get(self, request, match_id=None):\n if not match_id:\n return HttpResponse(\"Bad match id\")\n match = Match.objects.get(id=match_id)\n if match.result:\n return HttpResponse(\"Match already scored!\")\n players = match.get_players()\n scoreform = get_scorematch_form(players)(prefix='matchresult')\n awardform = get_award_formset(players)(prefix='award')\n return render(request, 'matchmaking/scorematch.html', {\"scoreform\": scoreform,\n \"awardformset\": awardform,\n \"match_id\": match_id})\n\n def post(self, request, match_id=None):\n if not match_id:\n return HttpResponse(\"Didn't get a match id in POST\")\n match = Match.objects.get(id=match_id)\n if match.result:\n return HttpResponse(\"Match already scored? from POST\")\n players = match.get_players()\n score_form = get_scorematch_form(players)(request.POST, prefix='matchresult')\n award_form = get_award_formset(players)(request.POST, prefix='award')\n\n if score_form.is_valid():\n print(score_form)\n\n if score_form.is_valid() and award_form.is_valid():\n if len(score_form.cleaned_data) > 1:\n return HttpResponse(\"Multiple score forms sent to ScoreMatchView - not supported\")\n score_form = score_form[0]\n matchwinner = score_form.cleaned_data['matchwinner']\n matchresult = MatchResult(winner=matchwinner)\n matchresult.save()\n\n for p in players:\n b, s = calc_rating_change(p.bracket, p.stars, p == matchwinner, len(match.get_players()))\n ratingchange = RatingChange(player=p, matchresult=matchresult, bracket_before=p.bracket, bracket_after=b,\n stars_before=p.stars, stars_after=s)\n ratingchange.save()\n p.bracket = ratingchange.bracket_after\n p.stars = ratingchange.stars_after\n p.save()\n\n for award in award_form.cleaned_data:\n Award(player=award['winner'], award=award['award'], match=matchresult).save()\n\n match.result = matchresult\n match.save()\n\n if 'from' in request.GET:\n return redirect(request.GET['from'])\n\n return redirect('admin:index')\n\n return HttpResponse(\"Not good\")\n # in ffa, winner gets 2 stars. in 1v1, winner gets 1 star\n # everyone else loses one star\n # if i go to -1 star bracket is -1 and star = 3\n # if i to to 4+ stars, bracket is +1 and star = 1\n\n\nclass MainView(View):\n def get(self, request):\n return render(request, 'matchmaking/matchmaking.html', {\"object_list\": Match.objects.all()})\n\n\nclass RemoveMatchView(View):\n def post(self, request, match_id=None):\n match = get_object_or_404(Match, id=match_id)\n match.delete()\n return HttpResponse(\"OK\")\n\n\nclass MatchView(View):\n def get(self, request, match_id=None, rnd=0):\n r = rnd\n if match_id:\n ans = get_object_or_404(Match, id=match_id)\n awd = ans.get_awards()\n else:\n return HttpResponse(\"shouldn't happen\")\n players = ans.players.all().order_by('team')\n if ans.result:\n for p in players:\n p.totalscore = len(ans.get_awards().filter(player=p)) * 0.2 + int(ans.result.winner == p)\n p.ratingchange = RatingChange.objects.filter(matchresult=ans.result, player=p)[0]\n\n return render(request, \"matchmaking/detail.html\", {\"match\": ans, \"round\": r, \"award\": awd, \"players\": players})\n\n def post(self, request):\n return HttpResponse(\"how did this happen?\")","repo_name":"johan-eriksson/dawn","sub_path":"tournament/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19361321850","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isSymmetric(self, root: Optional[TreeNode]) -> bool:\n # base case\n if root is None:\n return True\n else:\n return self.isMirror(root.left, root.right)\n \n def isMirror(self, left, right):\n # base case\n if left is None and right is None: return True \n if left is None or right is None: return False\n \n # if left and right are not none:\n if left.val == right.val:\n out_pair = self.isMirror(left.left, right.right)\n in_pair = self.isMirror(left.right, right.left)\n return out_pair and in_pair\n else: \n return False\n","repo_name":"namdang-exe/leetcode_challenge","sub_path":"SymmetricTree.py","file_name":"SymmetricTree.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18799351447","text":"import sys\r\nimport heapq\r\n\r\nN = int(sys.stdin.readline())\r\nM = int(sys.stdin.readline())\r\n\r\ngraph = [[] for _ in range(N + 1)]\r\n\r\nfor _ in range(M):\r\n s, d, c = map(int, sys.stdin.readline().split())\r\n graph[s].append((d, c)) # 목적지와 비용 추가\r\n\r\nsta, desti = map(int, sys.stdin.readline().split())\r\n\r\ndistance = [float('inf')] * (N + 1) # 최소 비용을 무한대로 초기화\r\ndistance[sta] = 0 # 출발지 비용 초기화\r\n\r\nqueue = [(0, sta)] # 최소 비용과 노드 번호를 큐에 저장\r\n\r\nwhile queue:\r\n dist, now = heapq.heappop(queue) # 우선순위 큐에서 가장 적은 비용과 노드 번호 pop\r\n\r\n if distance[now] < dist: # 찐 최소비용을 찾기 위해 무시\r\n continue\r\n\r\n for nxt, cost in graph[now]: # 인접한 노드\r\n if distance[nxt] > dist + cost: # 현재 노드를 거치는게 더 적다면\r\n distance[nxt] = dist + cost # 현재 노드까지의 최소 비용을 갱신\r\n heapq.heappush(queue, (distance[nxt], nxt)) # 우선순위 큐에 인접 노드와 비용을 push\r\n\r\nprint(distance[desti]) # 출발지에서 도착지까지의 최소 비용\r\n","repo_name":"AhnHz/Algorithm-study","sub_path":"백준/Gold/1916. 최소비용 구하기/최소비용 구하기.py","file_name":"최소비용 구하기.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"638835765","text":"import pandas as pd\nimport datetime\n\nif __name__==\"__main__\":\n client = pd.read_csv('client_handled.csv')\n current_date = datetime.date(2000, 1, 1)\n age = []\n\n for index, row in client.iterrows():\n born_date = datetime.datetime.strptime(row['birth_date'], \"%Y-%m-%d\")\n temp_age = current_date.year - born_date.year - 1\n if current_date.month >= born_date.month:\n if current_date.day >= born_date.day:\n temp_age += 1\n age.append(temp_age)\n client['age'] = pd.Series(age)\n client = client.drop(['birth_date'], axis=1)\n client.to_csv('client_h_dropped.csv', index=False)","repo_name":"survivingME/DM_homework1","sub_path":"workshop/modi_client.py","file_name":"modi_client.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33265644108","text":"\nimport scrapy\n\nfrom ..items import JobItem\n\n\nclass Spider(scrapy.Spider):\n name = 'indeed'\n start_urls = ['https://ar.indeed.com/jobs?q=python+developer&sort=date']\n\n def parse(self, response):\n cards = response.xpath('//div[@data-tn-component=\"organicJob\"]')\n for card in cards:\n link_element = card.css('h2.jobtitle > a')\n title = link_element.css(\"::text\").extract_first()\n href = 'https://ar.indeed.com{}'.format(\n link_element.xpath('@href').extract_first()\n )\n job_id = card.xpath('@data-jk').extract_first()\n desc = card.css('.summary::text').extract_first().strip()\n company = card.css('.company::text').extract_first()\n data = {\n 'job_id': job_id,\n 'title': title,\n 'link': href,\n 'description': desc,\n 'company': company,\n 'webpage': 'indeed.com'\n }\n JobItem(**data).save()\n","repo_name":"nicolascarbone/jobScraper","sub_path":"crawlers/crawlers/spiders/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37711947795","text":"import datetime\nimport logging\nimport urllib.parse\nimport requests\n\nfrom django.shortcuts import render\n\nfrom config.settings import MAPQUESTAPI_KEY, ABSTRACTAPI_KEY, ABSTRACTAPI_GEO_KEY\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_location_by_IP() -> dict:\n location_by_IP_api = \"https://ipgeolocation.abstractapi.com/v1/?\"\n location_by_IP_url = location_by_IP_api + urllib.parse.urlencode(\n {\"api_key\": ABSTRACTAPI_GEO_KEY}\n )\n json_data = requests.get(location_by_IP_url).json()\n error = f\"{json_data['error']['message']}: {json_data['error']['details']}\" if \"error\" in json_data else None\n current_location = f\"{json_data['city']}, {json_data['country']}\" if not error else None\n return {\n \"current_location\": current_location,\n \"error\": error\n }\n\n\ndef get_latlng(target_location: str) -> dict:\n lat, lng, error = None, None, None\n\n target_coordinates_api = \"http://www.mapquestapi.com/geocoding/v1/address?\"\n target_coordinates_url = target_coordinates_api + urllib.parse.urlencode(\n {\"key\": MAPQUESTAPI_KEY, \"location\": target_location}\n )\n\n json_data = requests.get(target_coordinates_url).json()\n json_status = json_data[\"info\"][\"statuscode\"]\n\n if json_status == 0:\n lat = json_data[\"results\"][0][\"locations\"][0][\"latLng\"][\"lat\"]\n lng = json_data[\"results\"][0][\"locations\"][0][\"latLng\"][\"lng\"]\n else:\n error = f\"Status Code: {json_status}; Refer to: {json_data['info']['messages'][0]}\"\n return {\"lat\": lat, \"lng\": lng, \"error\": error}\n\n\ndef get_sun_iso8601(latlng: dict) -> dict:\n sunrise_iso8601, sunset_iso8601, error = None, None, None\n\n sun_api = \"https://api.sunrise-sunset.org/json?\"\n latlng[\"formatted\"] = 0\n sun_url = sun_api + urllib.parse.urlencode(latlng)\n\n json_data = requests.get(sun_url).json()\n json_status = json_data[\"status\"]\n\n if json_status == \"OK\":\n sunrise_iso8601 = json_data[\"results\"][\"sunrise\"]\n sunset_iso8601 = json_data[\"results\"][\"sunset\"]\n elif json_status == \"INVALID_REQUEST\":\n error = \"Either lat or lng parameters are missing or invalid\"\n elif json_status == \"INVALID_DATE\":\n error = \"Date parameter is missing or invalid\"\n elif json_status == \"UNKNOWN_ERROR\":\n error = \"Request could not be processed due to a server error. Please try again.\"\n else:\n error = \"Unknown error occurred!\"\n return {\"sunrise\": sunrise_iso8601, \"sunset\": sunset_iso8601, \"error\": error}\n\n\ndef get_UTC_from_iso8601(timestamp: str) -> str:\n dt_timestamp = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S%z\")\n return dt_timestamp.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef get_localtime(target_location: str, timestamp: str, current_location: str) -> dict:\n sun_time, error = None, None\n\n convert_time_api = \"https://timezone.abstractapi.com/v1/convert_time?\"\n convert_time_url = convert_time_api + urllib.parse.urlencode(\n {\"api_key\": ABSTRACTAPI_KEY,\n \"base_location\": target_location,\n \"base_datetime\": timestamp,\n \"target_location\": current_location}\n )\n\n json_data = requests.get(convert_time_url).json()\n\n if \"error\" in json_data:\n error = f\"{json_data['error']['message']}: {json_data['error']['details']}\"\n sun_time = json_data['target_location']['datetime']\n\n return {\"sun_time\": sun_time, \"error\": error}\n\n\ndef index(request):\n if request.method == 'POST':\n\n # Get current location by POST request or by IP\n current_location = request.POST.get(\"current_location\")\n if current_location is None:\n location_response = get_location_by_IP()\n if location_response[\"error\"] is not None:\n render(request, 'index.html', {'error': location_response[\"error\"]})\n current_location = location_response[\"current_location\"]\n logger.debug(\"current_location >>> \" + current_location)\n\n # Get target location by POST reques\n target_location = request.POST.get(\"target_location\")\n logger.debug(\"target_location >>> \" + target_location)\n\n # get lat and lng from address\n latlng = get_latlng(target_location)\n if latlng[\"error\"] is not None:\n render(request, 'index.html', {'error': latlng[\"error\"]})\n del latlng[\"error\"]\n logger.debug(\"lat >>> \" + str(latlng[\"lat\"]) + \"\\tlng >>> \" + str(latlng[\"lng\"]))\n\n # get sunrise and sunset in iso8601 format\n sun_iso8601 = get_sun_iso8601(latlng)\n if sun_iso8601[\"error\"] is not None:\n render(request, 'index.html', {'error': sun_iso8601[\"error\"]})\n\n # convert iso8601 to UTC datetime format\n sunrise_utc = get_UTC_from_iso8601(sun_iso8601[\"sunrise\"])\n sunset_utc = get_UTC_from_iso8601(sun_iso8601[\"sunset\"])\n logger.debug(\"sunrise_utc >>> \" + sunrise_utc + \"\\tsunset_utc >>> \" + sunset_utc)\n\n # get local time sunrise\n response = get_localtime(target_location, sunrise_utc, current_location)\n if response[\"error\"] is not None:\n render(request, 'index.html', {'error': response[\"error\"]})\n sunrise = response[\"sun_time\"]\n\n # get local time sunset\n response = get_localtime(target_location, sunset_utc, current_location)\n if response[\"error\"] is not None:\n render(request, 'index.html', {'error': response[\"error\"]})\n sunset = response[\"sun_time\"]\n\n logger.debug(\"sunrise >>> \" + sunrise + \"\\tsunset >>> \" + sunset)\n return render(request, 'index.html', {\n 'current_location': current_location,\n 'target_location': target_location,\n 'sunrise': sunrise,\n 'sunset': sunset\n })\n return render(request, 'index.html')\n","repo_name":"Vostbur/DevOps-MDP-03","sub_path":"1.3.APIs_and_Parsing_JSON/option_2/config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"38176769134","text":"\n\nclass WashingGroup:\n \n def __init__(self, id, garmentsDict, garmentsDictAux):\n self.garments = []\n self.id = id\n self.dict = garmentsDict\n self.auxDict = garmentsDictAux\n self.allowed = set()\n self.garmentsInGroup = set()\n\n \n def addGament(self, newGament):\n self.garments.append(newGament)\n del self.dict[newGament.nro]\n self.garmentsInGroup.add(newGament.nro)\n self.remainingGaments = set(self.dict.keys())\n self.allowed = self.allowed.union(self.remainingGaments.difference(newGament.incompatibleGarments))\n \n \n def addGarments(self):\n \n self.checkGarmentsLeft()\n \n return self.garments \n\n\n def checkGarmentsLeft(self):\n goOn = True\n while goOn:\n nroBetterGament = -1\n maxTime = -1\n \n for nro in self.allowed:\n\n if nro in self.garmentsInGroup:\n continue\n \n elif self.canEnter(nro):\n if self.dict[nro].washTime > maxTime:\n maxTime = self.dict[nro].washTime\n nroBetterGament = nro\n\n elif self.dict[nro].washTime == maxTime and self.isBetterOption(nro,nroBetterGament):\n nroBetterGament = nro\n\n if nroBetterGament == -1:\n goOn = False\n \n else:\n self.addGament(self.dict[nroBetterGament])\n\n\n def isBetterOption(self, nro, actualNro):\n \n actualIncompatibilities = set()\n newIncompatibilities = set()\n incompatibilities = set()\n actualTime = 0\n newTime = 0\n\n for i in self.garmentsInGroup:\n if i != nro and i != actualNro:\n incompatibilities = incompatibilities.union(self.auxDict[i].incompatibleGarments)\n\n actualIncompatibilities = incompatibilities.union(self.auxDict[actualNro].incompatibleGarments)\n newIncompatibilities = incompatibilities.union(self.auxDict[nro].incompatibleGarments)\n\n if len(newIncompatibilities) < len(actualIncompatibilities):\n return True\n\n for i in newIncompatibilities:\n newTime += self.auxDict[i].washTime\n \n for i in actualIncompatibilities:\n actualTime += self.auxDict[i].washTime\n\n return (newTime > actualTime)\n \n\n\n def canEnter(self, nro):\n for garment in self.garments:\n if self.areImcompatibles(garment,self.dict[nro]):\n return False\n \n return True\n\n\n def areImcompatibles(self,aGarment, anotherGarment):\n return (aGarment.isIncompatible(anotherGarment) or anotherGarment.isIncompatible(aGarment))\n \n\n def getTotalTime(self):\n time = 0\n for aGarment in self.garments:\n if time < aGarment.washTime:\n time = aGarment.washTime\n \n return time\n\n \n def getGarments(self):\n r = []\n for g in self.garments:\n r.append([g.nro, self.id])\n\n return r\n \n \n\n\n\n","repo_name":"alfo777/modelos_tp2","sub_path":"WashingGroup.py","file_name":"WashingGroup.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27942077718","text":"\nimport speakandrecognize as sar\nimport datetime\nimport wikipedia\nimport random\nimport os\n\ndef wishme():\n hour = int(datetime.datetime.now().hour)\n if hour >=0 and hour<=12:\n sar.speak(\"Good morning ! \")\n elif hour>=12 and hour <18:\n sar.speak(\"Good aftenoon ! \")\n else:\n sar.speak(\"Good evening! \")\n sar.speak(\"How may i help you ?\")\n\n\ndef wiki(query):\n try:\n print(\"Searching on Wikipedia.....\")\n sar.speak('Searching on Wikipedia.....')\n \n query = query.replace(\"search\",\"\")\n query = query.replace(\"wikipedia\",\"\")\n query = query.replace(\"on\",\"\")\n\n results = wikipedia.summary (query,sentences=2)\n sar.speak(\"According to wikipedia - \")\n sar.speak(results)\n except :\n print(\"error- KAREN did not recognized what you said \")\n sar.speak(\"I did not get it. Try saying that again\")\n","repo_name":"shivanggarg1998/KAREN","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18938394664","text":"# -*- coding: utf-8 -*-\n\nfrom .makeSentence import MakeSentence\nfrom common_lib import util\n\nclass ShotMakeSentence(MakeSentence):\n\n # 返却文章を作る \n def makeSentence(self,dictionary,request):\n \n dicReturn = {'sentence':'','what_ask':''}\n \n # 質問内容\n what_ask = request.POST[\"what_ask\"]\n util.log(\"what_ask------------------------\")\n util.log(what_ask)\n\n if what_ask == 'LOC':\n if dictionary['LOC']['val'] != '':\n if dictionary['LOC']['if_true'] == '1':\n dicReturn['sentence'] = '続いて希望の職種はなんですか'\n dicReturn['what_ask'] = 'JOB'\n else:\n dicReturn['sentence'] = 'それは地名ですか?'\n dicReturn['what_ask'] = 'LOC_CONFIRM'\n \n else:\n dicReturn['sentence'] = 'すみません、もう一度希望勤務地をお聞かせください。'\n dicReturn['what_ask'] = 'LOC'\n elif what_ask == 'LOC_CONFIRM' and dictionary['LOC']['if_true'] == '1':\n dicReturn['sentence'] = '続いて希望の職種はなんですか'\n dicReturn['what_ask'] = 'JOB'\n \n elif what_ask == 'JOB': \n if dictionary['JOB']['val'] != '':\n dicReturn['sentence'] = '給与はいくらぐらいを希望されていますか'\n dicReturn['what_ask'] = 'MONEY'\n else:\n dicReturn['sentence'] = 'すみません、職種に関してよくわからなかったので、もう一度お願いします。'\n dicReturn['what_ask'] = 'JOB'\n\n\n elif what_ask == 'MONEY': \n if dictionary['MONEY']['val'] != '':\n dicReturn['sentence'] = \"\"\"確認させていただきます \n 希望勤務地:\"\"\"+dictionary['LOC']['val']+\"\"\" \n 希望職種:\"\"\"+dictionary['JOB']['val']+\"\"\"\n 希望給与:\"\"\"+dictionary['MONEY']['val']+\"\"\"\n こちらでお間違いないでしょうか。\"\"\"\n dicReturn['what_ask'] = 'CONFIRM'\n else:\n dicReturn['sentence'] = 'すみません、給与に関してよくわからなかったので、もう一度お願いします。'\n dicReturn['what_ask'] = 'MONEY'\n\n\n util.log(dicReturn)\n \n return dicReturn\n\n","repo_name":"shiratsu/hiratsuka_django","sub_path":"conv_lib/ShotMakeSentence.py","file_name":"ShotMakeSentence.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41684609837","text":"import base64\nimport ctypes\nimport time\nimport logging\nfrom ctypes import c_ulong, wintypes\n\nimport cv2\nimport numpy as np\n\n\nclass DisplayPattern:\n def __init__(self):\n t = time.localtime()\n\n log_name = \"log/\" + \\\n str(t.tm_year) + \"-\" + \\\n str(t.tm_mon) + \"-\" +\\\n str(t.tm_mday) + \"-\" + \\\n str(t.tm_hour) + \"-\" +\\\n str(t.tm_min) + \"-\" +\\\n str(t.tm_sec) + \".log\"\n\n log_format=\"<TIME>%(asctime)s <LINE>%(lineno)d %(funcName)s <%(levelname)s> %(message)s\"\n\n logging.basicConfig(format=log_format,filename=log_name, filemode='w', level=logging.DEBUG)\n\n self.dis_num_dic = {}\n self.monitor_handle_position_dic = {}\n self.is_brightness_stop_change_flag = {\n \"top\": True, \"n\": True, \"w\": True, \"s\": True, \"e\": True}\n self.brightness_change_delay_time = {}\n self.last_change_brightness_time = {}\n\n self.dxva2 = ctypes.windll.Dxva2\n\n import win32api\n\n monitor_list = win32api.EnumDisplayMonitors()\n\n for current_monitor in monitor_list:\n\n monitor_info = win32api.GetMonitorInfo(current_monitor[0])\n\n display_num = monitor_info[\"Device\"].lstrip(\"\\\\.\\\\\")\n monitor_handle_value = current_monitor[0].handle\n monitro_left_top_x = monitor_info[\"Monitor\"][0]\n monitro_left_top_y = monitor_info[\"Monitor\"][1]\n\n self.monitor_handle_position_dic[display_num] = [\n monitor_handle_value,\n (monitro_left_top_x, monitro_left_top_y),\n ]\n\n def show_display_number(self):\n\n try:\n for monitor_handle_position_dic in self.monitor_handle_position_dic:\n x, y = self.monitor_handle_position_dic[monitor_handle_position_dic][1]\n img_white_bg = np.zeros((1920, 1920, 3), np.uint8)\n img_white_bg[:] = (255, 255, 255)\n text = monitor_handle_position_dic\n cv2.putText(\n img_white_bg,\n text,\n (495, 1020),\n cv2.FONT_HERSHEY_TRIPLEX,\n 6,\n (0, 0, 0),\n 8,\n cv2.LINE_AA,\n )\n cv2.namedWindow(text, cv2.WINDOW_NORMAL)\n cv2.setWindowProperty(\n text, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n cv2.moveWindow(text, x, y)\n cv2.imshow(text, img_white_bg)\n cv2.waitKey(1)\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n return False\n\n def set_direction(\n self,\n dis_num_top: str,\n dis_num_n: str,\n dis_num_w: str,\n dis_num_s: str,\n dis_num_e: str,\n ):\n\n try:\n dis_num_top = dis_num_top.upper()\n dis_num_n = dis_num_n.upper()\n dis_num_w = dis_num_w.upper()\n dis_num_s = dis_num_s.upper()\n dis_num_e = dis_num_e.upper()\n\n if dis_num_top[:7] == \"DISPLAY\":\n self.dis_num_dic[\"top\"] = dis_num_top\n else:\n return False, ('top', dis_num_top)\n\n if dis_num_n[:7] == \"DISPLAY\":\n self.dis_num_dic[\"n\"] = dis_num_n\n else:\n return False, ('n', dis_num_n)\n\n if dis_num_w[:7] == \"DISPLAY\":\n self.dis_num_dic[\"w\"] = dis_num_w\n else:\n return False, ('w', dis_num_w)\n\n if dis_num_s[:7] == \"DISPLAY\":\n self.dis_num_dic[\"s\"] = dis_num_s\n else:\n return False, ('s', dis_num_s)\n\n if dis_num_e[:7] == \"DISPLAY\":\n self.dis_num_dic[\"e\"] = dis_num_e\n else:\n return False, ('e', dis_num_e)\n\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n return False\n\n def get_brightness(self, direction: str):\n\n try:\n display_num = self.dis_num_dic[direction]\n monitor_handle = self.monitor_handle_position_dic[display_num][0]\n\n # Specify physical mointor\n physical_monitor = (PHYSICAL_MONITOR * 1)()\n self.dxva2.GetPhysicalMonitorsFromHMONITOR(\n monitor_handle, c_ulong(1), physical_monitor\n )\n\n # Get Minimum/Current/Maximum of brightness\n min_brightness = wintypes.DWORD()\n max_brightness = wintypes.DWORD()\n current_brightness = wintypes.DWORD()\n self.dxva2.GetMonitorBrightness(\n physical_monitor[0].hPhysicalMonitor,\n ctypes.byref(min_brightness),\n ctypes.byref(current_brightness),\n ctypes.byref(max_brightness),\n )\n logging.info(\"OK\")\n return True, current_brightness.value\n\n except Exception as ex:\n logging.error(str(ex))\n return False, str(ex)\n\n def set_brightness(self, direction: str, target_brightness: int):\n\n try:\n if not self.is_brightness_stop_change_flag[direction]:\n\n sotp_change_brightness_time = (\n self.brightness_change_delay_time[direction] +\n self.last_change_brightness_time[direction]\n )\n\n if time.time() >= sotp_change_brightness_time:\n self.is_brightness_stop_change_flag[direction] = True\n\n else:\n\n brightness_change_time_left = sotp_change_brightness_time - time.time()\n logging.info(\"Brightness still change: \" + str(round(brightness_change_time_left, 2)))\n return False, round(brightness_change_time_left, 2)\n\n if self.is_brightness_stop_change_flag[direction]:\n currnet_brightness = self.get_brightness(direction)[1]\n brightness_difference = abs(\n currnet_brightness - target_brightness)\n\n self.brightness_change_delay_time[direction] = brightness_difference / 3.33\n display_num = self.dis_num_dic[direction]\n monitor_handle = self.monitor_handle_position_dic[display_num][0]\n\n # Specify physical mointor\n physical_monitor = (PHYSICAL_MONITOR * 1)()\n self.dxva2.GetPhysicalMonitorsFromHMONITOR(\n monitor_handle, c_ulong(1), physical_monitor\n )\n\n # Modify brightness\n self.dxva2.SetMonitorBrightness(\n physical_monitor[0].hPhysicalMonitor, target_brightness\n )\n\n if brightness_difference > 0:\n self.is_brightness_stop_change_flag[direction] = False\n self.last_change_brightness_time[direction] = time.time()\n logging.info(\"OK\")\n return True, round(self.brightness_change_delay_time[direction], 2)\n\n except Exception as ex:\n logging.error(str(ex))\n return False, str(ex)\n\n def set_all_brightness(self, target_brightness: int):\n try:\n\n is_assert, err_msg = self.set_brightness('top', target_brightness)\n err_msg = ('top', err_msg)\n assert is_assert\n\n is_assert, err_msg = self.set_brightness('n', target_brightness)\n err_msg = ('n', err_msg)\n assert is_assert\n\n is_assert, err_msg = self.set_brightness('w', target_brightness)\n err_msg = ('w', err_msg)\n assert is_assert\n\n is_assert, err_msg = self.set_brightness('s', target_brightness)\n err_msg = ('s', err_msg)\n assert is_assert\n\n is_assert, err_msg = self.set_brightness('e', target_brightness)\n err_msg = ('e', err_msg)\n assert is_assert\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n logging.error(err_msg)\n return False\n\n def set_img(self, direction: str, img_base64: str):\n\n try:\n if True:\n display_num = self.dis_num_dic[direction]\n x, y = self.monitor_handle_position_dic[display_num][1]\n\n img_base64_decode = base64.b64decode(img_base64)\n img_np_ndarray = np.frombuffer(img_base64_decode, dtype=np.uint8)\n img_source = cv2.imdecode(img_np_ndarray, 1)\n\n # Set Window Title\n cv2.namedWindow(direction, cv2.WINDOW_NORMAL)\n\n # Set Window Property - FULLSCREEN\n cv2.setWindowProperty(direction, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n # Move Window - Position(x, y)\n cv2.moveWindow(direction, x, y)\n\n # Show Image Window\n cv2.imshow(direction, img_source)\n cv2.waitKey(1000)\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n return False\n\n def set_brightness_img(self, direction: str, target_brightness: int, img_base64: str):\n\n try:\n is_assert = self.set_img(direction, img_base64)\n assert is_assert\n is_assert, err_msg = self.set_brightness(\n direction, target_brightness)\n assert is_assert\n logging.info(\"OK\")\n return True, err_msg\n\n except Exception as ex:\n logging.error(str(ex))\n return False, str(ex)\n\n def close_all_window(self):\n\n try:\n cv2.destroyAllWindows()\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n return False\n\n def close_window(self, direction: str):\n\n try:\n cv2.destroyWindow(direction)\n logging.info(\"OK\")\n return True\n\n except Exception as ex:\n logging.error(str(ex))\n return False\n\n\nclass PHYSICAL_MONITOR(ctypes.Structure):\n _fields_ = [\n (\"hPhysicalMonitor\", wintypes.HANDLE),\n (\n \"szPhysicalMonitorDescription\",\n ctypes.c_wchar * 128,\n ),\n ]\n\n\nif __name__ == \"__main__\":\n dp = DisplayPattern()\n\n import base64\n\n with open(\"pattern_top.png\", \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n\n msg = dp.set_direction(\"DISPLAY4\", \"DISPLAY5\",\n \"DISPLAY6\", \"DISPLAY1\", \"DISPLAY2\")\n print(msg)\n # print(dp.dis_num_dic)\n msg = dp.get_brightness(\"top\")\n print(msg)\n dp.set_img(\"top\", encoded_string)\n time.sleep(5)\n\n dp.close_window(\"top\")\n","repo_name":"U07157135/PyQtProject","sub_path":"display_setting/display_pattern.py","file_name":"display_pattern.py","file_ext":"py","file_size_in_byte":10902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12780556956","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom logally import model\n\n\nKNOWN_REGEXPS = {\n \"syslog\":\n {\n \"regexp\": r'(?P<m>[0-9A-z]+)\\s+' \\\n '(?P<d>[0-9]+)\\s+' \\\n '(?P<H>[0-9]+)[^0-9](?P<M>[0-9]+)[^0-9](?P<S>[0-9]+)\\s+' \\\n '(?P<host>[^ ]+)\\s+' \\\n '(?P<source>[^:]+):\\s+' \\\n '(?P<msg>.*)$',\n \"columns\": {\n \"timestamp\": \"{m} {d} {H}:{M}:{S}\",\n \"host\": \"{host}\",\n \"source\": \"{source}\",\n \"msg\": \"{msg}\"\n }\n },\n}\n\n\nclass RegexpTable(model.Table):\n def __init__(self, regexp=None, skip_rows=0):\n pass\n","repo_name":"parkisan/logally","sub_path":"logally/plugins/tables/regexp_table.py","file_name":"regexp_table.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"3970893228","text":"from os import environ\n\nimport requests\n\n\nclass SpotifyAPIError(Exception):\n pass\n\n\ndef _get_token() -> str:\n # This two values are obtained following \"Authorization Code Flow\" in\n # https://developer.spotify.com/documentation/general/guides/authorization-guide/\n client_id = environ.get('SPOTIFY_CLIENT_B64')\n refresh_token = environ.get('SPOTIFY_REFRESH_TOKEN')\n body = dict(grant_type='refresh_token', refresh_token=refresh_token)\n headers = dict(Authorization=f'Basic {client_id}')\n response = requests.post(\n \"https://accounts.spotify.com/api/token\", data=body, headers=headers)\n if response.status_code == 200:\n return response.json().get('access_token')\n else:\n raise SpotifyAPIError(response.json())\n\n\ndef get_tempo(track_id: str) -> float:\n \"\"\"\n Get song tempo (BPM) from Spotify API.\n\n :param track_id: Song id in Spotify.\n :return: Song tempo.\n \"\"\"\n token = _get_token()\n headers = dict(Authorization=f'Bearer {token}')\n endpoint = f'https://api.spotify.com/v1/audio-features/{track_id}'\n response = requests.get(endpoint, headers=headers)\n if response.status_code == 200:\n return response.json().get('tempo')\n else:\n raise SpotifyAPIError(response.json())\n","repo_name":"Fredy/Missing-Beats","sub_path":"src/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3600964424","text":"from typing import Callable,NamedTuple, Union\nimport abc\n\nfrom algorithms import environment\nfrom workflow import reporting\nfrom algorithms import data\n\nimport numpy as np\nimport torch\n\n\nclass OnlineAgentParams(NamedTuple):\n batch_size: int\n discount_factor: float\n num_envs: int = 1\n num_steps: int = 1\n\n\nclass OnlineAgent:\n def __init__(self, make_env: Callable[[], environment.Environment[Union[np.ndarray, int]]], device: torch.device,\n params: OnlineAgentParams):\n self._params = params\n self._envs = [make_env() for _ in range(params.num_envs)]\n state_dim = self._envs[0].state_dim\n action_dim = self._envs[0].action_dim\n self._device = device\n\n self._states = torch.zeros((params.batch_size, state_dim)).pin_memory()\n self._actions = torch.zeros((params.batch_size, action_dim)).pin_memory()\n self._rewards = torch.zeros((params.batch_size,)).pin_memory()\n self._bootstrap_weights = torch.zeros((params.batch_size,)).pin_memory()\n self._bootstrap_states = torch.zeros((params.batch_size, state_dim)).pin_memory()\n self._bootstrap_actions = torch.zeros((params.batch_size, action_dim)).pin_memory()\n\n reporting.register_field(\"return\")\n\n def update(self):\n i = 0\n while i < self._params.batch_size:\n env = np.random.choice(self._envs)\n j = i\n for t in range(self._params.num_steps):\n if env.needs_reset:\n reporting.iter_record(\"return\", env.cumulative_return())\n env.reset()\n break\n\n state = env.state\n action = np.atleast_1d(self.sample_action(state))\n next_state, reward, is_terminal, _ = env.step(action)\n\n if is_terminal:\n self._bootstrap_weights[i] = 0.\n else:\n self._bootstrap_weights[i] = self._params.discount_factor\n self._rewards[i] = reward\n self._states[i] = torch.Tensor(state)\n self._actions[i] = torch.Tensor(action)\n self._bootstrap_states[i] = torch.Tensor(next_state)\n\n for past in range(j, i):\n self._rewards[past] += reward * self._params.discount_factor ** (i + 1 - past)\n self._bootstrap_states[past] = self._bootstrap_states[i]\n self._bootstrap_weights[past] *= self._bootstrap_weights[i]\n\n i += 1\n\n if i >= self._params.batch_size:\n break\n\n td_batch = data.TDBatch(\n states=self._states.to(self._device),\n actions=self._actions.to(self._device),\n intermediate_returns=self._rewards.to(self._device),\n bootstrap_weights=self._bootstrap_weights.to(self._device),\n bootstrap_states=self._bootstrap_states.to(self._device),\n bootstrap_actions=self._bootstrap_actions.to(self._device))\n\n self._update(td_batch)\n\n @abc.abstractmethod\n def sample_action(self, state: np.ndarray) -> Union[np.ndarray, int]:\n pass\n\n @abc.abstractmethod\n def _update(self, batch: data.TDBatch):\n pass\n","repo_name":"yschroecker/universal_value_density_estimation","sub_path":"algorithms/agents/common/online_agent.py","file_name":"online_agent.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"19129166963","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n # O(N) time and space\n def maximumPopulation(self, logs: List[List[int]]) -> int:\n res = 0\n count = defaultdict(int)\n for b, d in logs:\n count[b] += 1\n count[d] -= 1\n\n for i in range(1950, 2051):\n count[i] += count[i - 1]\n res = res if count[res] >= count[i] else i\n\n return res\n","repo_name":"Semeriuss/Data-Structures-and-Algorithms","sub_path":"squid_game_a2sv/round_4/day_27/maxPopulation.py","file_name":"maxPopulation.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20000452399","text":"class TrieNode:\n def __init__(self):\n self.children = {}\n self.end = False\n\n def addWord(self, word):\n node = self\n for c in word:\n if node.children.get(ord(c) - ord('a')) is None:\n node.children[ord(c) - ord('a')] = TrieNode()\n\n node = node.children[ord(c) - ord('a')]\n node.end = True\n\n\nclass WordFilter(object):\n\n def __init__(self, words):\n \"\"\"\n :type words: List[str]\n \"\"\"\n self.maps = {}\n self.head = TrieNode()\n\n for i in range(len(words)):\n word = words[i]\n self.maps[word] = i\n\n word = '#' + word\n self.head.addWord(word)\n\n suffix = ''\n for j in range(len(word) - 1, 0, -1):\n suffix = word[j] + suffix\n self.head.addWord(suffix + word)\n\n def f(self, prefix, suffix):\n \"\"\"\n :type prefix: str\n :type suffix: str\n :rtype: int\n \"\"\"\n temp = suffix + '#' + prefix\n\n node = self.head\n res = []\n\n for c in temp:\n if node.children.get(ord(c) - ord('a')) is None:\n return -1\n node = node.children[ord(c) - ord('a')]\n\n def helper(curr, node):\n if node.end:\n res.append(curr)\n\n for key in node.children.keys():\n c = chr(key + ord('a'))\n helper(curr + c, node.children[key])\n\n helper(prefix, node)\n result = -1\n\n for word in res:\n if self.maps[word] > result:\n result = self.maps[word]\n\n return result\n\n# Your WordFilter object will be instantiated and called as such:\n# obj = WordFilter(words)\n# param_1 = obj.f(prefix,suffix)","repo_name":"hansxiao7/leetcode-python","sub_path":"0745--Prefix and Suffix Search/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"1419473857","text":"from WaveLoader import WaveLoader\nimport torch\n\n# sox convert\n# sox input.wav -r 8000 -c 1 output.wav\n\nsample = './pemex8k.wav'\nwl = WaveLoader(sample) # type WaveLoader\ndata, label = wl.get_sample()\n\nprint(data.shape, label.shape)\n\n# Write junk\nt = torch.rand((5, 8000))\nt = torch.mul(t, 65535)\nt = torch.sub(t, 65535/2)\n\nwl.write_wave(t, './output.wav', 1, 2, 8000)\n\n\n#write pemex\nbuff = [data.tolist()]\nfor x in range(25):\n data, label = wl.get_sample()\n buff.append(data.tolist())\n\npemex = torch.tensor(buff)\nwl.write_wave(pemex, './output-pemex.wav', 1, 2, 8000)\n\nwl.play_sample_tensor(pemex, 1, 2, 8000)","repo_name":"Echooff3/PythonStuff","sub_path":"WaveLoader/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32166641825","text":"import bigcommerce\nimport math\nimport MySQLdb\nimport _mysql\nimport datetime\nimport csv\n\ndb = _mysql.connect(\"localhost\",user=\"root\",passwd=\"Economics1$&*\",db=\"BigCommerce\")\n\ndb.query(\"\"\"Select store_address, username, api_key from dashfacts_Clients\"\"\")\n\nstore_list = db.store_result()\n\nstore_tuple = store_list.fetch_row()\n\nfor i in store_tuple:\n\tbigcommerce.Connection.api_key =i[2]\n\t\n\tbigcommerce.Connection.user =i[1]\n\t\n\tbigcommerce.Connection.host = i[0]\n\t\n\tordercount = 1\n\t\n\tcol = ['id',\n 'name',\n 'type',\n 'amount',\n 'min_purchase',\n 'expires',\n 'enabled',\n 'code',\n 'applies_to',\n 'num_uses',\n 'max_uses',\n 'max_uses_per_customer',\n 'restricted_to',\n 'shipping_methods']\n\t\n\ta=[col]\n\t\n\tdata = []\n\t\n\tfor n in xrange(1,ordercount+1):\n\t\torders = bigcommerce.Coupons.get(n)\n\t\tfor o in orders:\n\t\t\td=[] \n\t\t\tfor c in col: \n\t\t\t\tb=getattr(o,c) \n\t\t\t\td.append(b)\n\t\t\tdata.append(d)\n \n\tfor n in xrange(0, len(data)):\n\t\tdata[n].insert(0,i[0])\n\ncol.insert(0,'store_name')\n\ndata.insert(0,col)\n\nbill=[]\n\n\napp= [['ids','entity']]\nfor d in data[1:]:\n\tv=d[9]\n\ty=[]\n\tif type(v)==list:\n\t\ty=['','']\n\telse:\t\n\t\tfor i in v.itervalues():\n\t\t\ty.append(str(i))\n\tapp.append(y)\n\nres=[['restricted_to']]\nfor d in data[1:]:\n\tt=d[13]\n\ty=[]\n\tif type(t)==list:\n\t\ty=['']\n\telse: \n\t\tfor i in t.itervalues():\n\t\t\ty.append(str(i))\n\tres.append(y)\n\n\ni=0\n\nfor v in app:\n\tdata[i].pop(9)\n\tfor a in v:\n\t\tdata[i].insert(9,a)\n\ti=i+1\n\ni=0\n\nfor v in res:\n\tdata[i].pop(14)\n\tfor a in v:\n\t\tdata[i].insert(14,a)\n\ti=i+1\n\nnow = datetime.datetime.now()\n\ndate = now.strftime(\"%Y%m%d\")\n\na ='/home/mjhabiger/bigcommerce/hist_data/Coupons%s.txt' % date\n\n##data=str(data)\n\nwith open(a,'w') as the_file:\n csv.register_dialect(\"custom\",delimiter=\"|\", skipinitialspace=True)\n writer = csv.writer(the_file, dialect=\"custom\")\n for t in data:\n writer.writerow(t)\n\n##f = open(a, 'w') \n\n##f.write(data)\n\nthe_file.close()\n\nimport shutil\n\nshutil.copyfile(a,'/home/mjhabiger/bigcommerce/Coupons.txt') \n\n\n\t\t\n \n\t\n\t\n\t\n\t\n","repo_name":"cedaly1968/ETL","sub_path":"big_commerce/scripts/coupons.py","file_name":"coupons.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"6578727077","text":"from celery import Celery\nimport celeryconfig\n\ndef create_celery_app():\n celery = Celery(__name__, include=['celerytasks'])\n celery.config_from_object(celeryconfig)\n return celery\n\ncelery = create_celery_app()\n\nif __name__ == \"__main__\":\n celery.start()\n","repo_name":"Knugn/ACC-A3","sub_path":"celeryapp.py","file_name":"celeryapp.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34634014739","text":"#Object Oriented Classes\nfrom Network.StableRoommates import *;\nfrom Network.RandomMates import *;\nfrom Clustering.Distribution import *;\n\n#Third Party Classes\nfrom numpy import * \nfrom geopy import distance #Geo Location\nimport json, io, datetime, progressbar, csv\nfrom xml.etree import ElementTree\n\nfirstList = []\nsecondList = [] \n\n#Reading Json files to populate the list \n#NODES\nFileName = \".\\Source\\Data\\Study\\Soweto.json\"\nwith io.open(FileName, 'r', encoding=\"utf-8\") as JsonData:\n JsonGeoData = json.load(JsonData)\n ResidualEnergies, SNRs = Distribution.Normal() # Random Values\n\n count = 1 \n print(\"\\nCreating Nodes.\")\n for k, value in JsonGeoData.items():\n firstList.append({\"name\": k, \n \"id\": str(count),\n \"residual\": random.choice(ResidualEnergies), \n \"snr\": random.choice(SNRs),\n \"pos\": (float(value[\"geometry\"][\"location\"]['lat']), float(value[\"geometry\"][\"location\"]['lng']))})\n count += 1\n\n#USERS\nFileName = \".\\Source\\Data\\Health Centers\\Soweto.json\"\nwith io.open(FileName, 'r', encoding=\"utf-8\") as JsonData:\n JsonGeoData = json.load(JsonData)\n BDs = random.normal(10, 2.1, 100000000)\n\n count = 1\n print(\"Creating Users.\")\n for k, value in JsonGeoData.items():\n secondList.append({\"name\": k,\n \"id\": str(count),\n \"bandwidwith\": random.choice(BDs),\n \"pos\": (float(value[\"geometry\"][\"location\"]['lat']), float(value[\"geometry\"][\"location\"]['lng']))})\n count += 1\n\nprint(\"Computing Preferences.\")\n#Computing preferences\ndef Distance(node, user):\n return distance.distance(node[\"pos\"], user[\"pos\"]).kilometers\n\n#NODES\nfor node in firstList:\n preferences = []\n for user in secondList:\n if Distance(node, user) <= 5:\n preferences.append(user[\"name\"])\n node[\"preferences\"] = preferences\n\n#USERS\nAverage_Residual = average([(node[\"residual\"]) for node in firstList])\nAverage_SNR = average([node[\"snr\"] for node in firstList])\n\nfor user in secondList:\n preferences = []\n for node in firstList:\n if node[\"snr\"] >= Average_SNR and node[\"residual\"] >= Average_Residual:\n preferences.append(node[\"name\"])\n user[\"preferences\"] = preferences\n\n\"\"\"\nfirstList = [{\"name\":'A',\"age\":40,\"preferences\":['U','V','W','X','Y','Z']},\n {\"name\":'B',\"age\":21,\"preferences\":['V','W','Y','U','X','Z']},\n {\"name\":'C',\"age\":30,\"preferences\":['V','W','U','Y','X','Z']},\n {\"name\":'D',\"age\":50,\"preferences\":['V','W','U','Y','X','Z']},\n {\"name\":'E',\"age\":28,\"preferences\":['U','W','V','Y','X','Z']},\n {\"name\":'F',\"age\":28,\"preferences\":['V','W','U','Y','X','Z']}\n ]\n\nsecondList = [{\"name\":'U',\"age\":30,\"preferences\":['A','B','C','D','E','F']},\n {\"name\":'V',\"age\":31,\"preferences\":['A','C','B','D','E','F']},\n {\"name\":'W',\"age\":40,\"preferences\":['A','C','B','D','E','F']},\n {\"name\":'X',\"age\":34,\"preferences\":['A','B','C','D','E','F']},\n {\"name\":'Y',\"age\":45,\"preferences\":['A','B','C','D','E','F']},\n {\"name\":'Z',\"age\":45,\"preferences\":['A','B','C','D','F','E']}\n ]\n\n\"\"\"\n\nprint(\"Performing Stable Roomate Alloction.\")\npg = StableRoommates(firstList, secondList)\n\nUSERS = pg.getUsers()\n\nAllocation = {}\nResults = {}\n\nPreferredRoommates = 0\nPontetialRoommates = 0\nCountRandommates = 0\n\nfor user in USERS:\n Allocation[str(user.id)] = {\"id\": user.id,\n \"name\": user.name,\n \"pos\": user.pos,\n \"preferred\": [u for u in user.preferredMates],\n \"stableRoommate\": {\"id\": user.mate.id, \n \"name\": user.mate.name, \n \"pos\": user.mate.pos},\n \"randomRoommate\": {\"id\": user.randomMate.id, \n \"name\": user.randomMate.name, \n \"pos\": user.randomMate.pos}}\n \n if str(user.mate.name) in [user.name for user in user.potentialMates]:\n PontetialRoommates += 1\n\n for node in firstList:\n if str(node[\"name\"]) == str(user.name):\n if str(user.mate.name) in node[\"preferences\"]:\n PreferredRoommates += 1\n break;\n else:\n print(user.name, node[\"preferences\"])\n\n if str(user.randomMate.name) in user.preferredMates:\n CountRandommates += 1\n\nResults = {\"totalUsers\": len(USERS),\n \"pontentialRoommate\": PontetialRoommates,\n \"preferredRoommate\": PreferredRoommates,\n \"randomRoommate\": CountRandommates}\n\n#Creating an external files\nprint(\"\\n_______________________________________\\nCreating an external files\")\nwith open(\"./Allocation.json\", 'w') as fp: json.dump(Allocation, fp, indent = 4)\nfp.close()\nwith open(\"./Results.json\", 'w') as fp: json.dump(Results, fp, indent = 4)\nfp.close()","repo_name":"kloniphani/Exocoetidae","sub_path":"Project Wing/Task_Allocation.py","file_name":"Task_Allocation.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44708842133","text":"# Projeto par ou ímpar\n# 0 2 4 6 8 10 .... (pares)\n# 1 3 5 7 9 11 .... (ímpares)\n\n# Mod % - dá o restante de uma divisão\n\nwhile True:\n try:\n valor = int(input('Digite um valor:'))\n if valor % 2 == 0:\n print('Número par')\n else:\n print('Número ímpar')\n except:\n print('Digite apenas números')\n","repo_name":"DiegoVarzim/projeto-par_impar","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"2327663633","text":"\"\"\"\nhttps://www.acmicpc.net/problem/1929\n\n\"\"\"\nimport math\nn, m = map(int, input().split())\n\ndp = [-1] * (m + 1)\ndp[1] = 2\n\nfor k in range(4, m+1, 2):\n dp[k] = 2\n\n\nfor i in range(n, m+1):\n\n # 만약 i가 소수면 i의 2배는 다 2로 만들자.\n \n # 처음 들어오거나 이전에 구한 소수에서 나온 수가 아니면 소수 판별에 들어간다.\n if dp[i] == -1:\n flag = 0\n for l in range(2, int(math.sqrt(i))+1):\n if i % l == 0:\n flag = 1\n break\n \n if flag == 0:\n print(i)\n for a in range(i, m+1, i*2):\n dp[a] = 2\n\n","repo_name":"CastleRain/coding_test","sub_path":"backjoon/기본수학/(1929) 소수 구하기.py","file_name":"(1929) 소수 구하기.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"9747041322","text":"import pygame\nimport Interface.cars as cars\nimport Interface.questions as questions\nfrom random import randint\n\npygame.init()\n\n# SETUP THE GAME WINDOW\ndisplay_width, display_height = 800, 800\ngameDisplay = pygame.display.set_mode((display_width, display_height))\npygame.display.set_caption('Kinder Racing')\npygame.display.set_icon(cars.player)\nclock = pygame.time.Clock()\nengine = pygame.mixer.Sound(\"..\\Resources\\Sounds\\misc\\engine.ogg\")\nbackground = pygame.image.load(\"..\\Resources\\Track\\\\TrackTest.png\")\n\n\n# CAR FUNCTIONS\ndef player_car(x, y):\n gameDisplay.blit(cars.player, (x, y))\n\n\ndef computer_one_car(x, y):\n gameDisplay.blit(cars.computer_one, (x, y))\n\n\ndef computer_two_car(x, y):\n gameDisplay.blit(cars.computer_two, (x, y))\n\n\ndef computer_three_car(x, y):\n gameDisplay.blit(cars.computer_three, (x, y))\n\n\ndef lap_tracker(lap):\n if lap == 1:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_1.png\")\n elif lap == 2:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_2.png\")\n elif lap == 3:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_3.png\")\n elif lap == 4:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_4.png\")\n elif lap == 5:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_5.png\")\n elif lap == 6:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_6.png\")\n elif lap == 7:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_7.png\")\n elif lap == 8:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_8.png\")\n elif lap == 9:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_9.png\")\n elif lap == 10:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_10.png\")\n else:\n image = pygame.image.load(\"..\\Resources\\Laps\\lap_1.png\")\n\n image = pygame.transform.scale(image, (50, 50))\n return image\n\n\ndef display_lap(pc, comp_1, comp_2, comp_3, car_one, car_two, car_three, car_four):\n gameDisplay.blit(pc, (car_one.lap_pos, 0))\n gameDisplay.blit(comp_1, (car_two.lap_pos, 0))\n gameDisplay.blit(comp_2, (car_three.lap_pos, 0))\n gameDisplay.blit(comp_3, (car_four.lap_pos, 0))\n\n car_one.set_pos(car_two.dist_traveled, car_three.dist_traveled, car_four.dist_traveled)\n car_two.set_pos(car_one.dist_traveled, car_three.dist_traveled, car_four.dist_traveled)\n car_three.set_pos(car_one.dist_traveled, car_two.dist_traveled, car_four.dist_traveled)\n car_four.set_pos(car_one.dist_traveled, car_two.dist_traveled, car_three.dist_traveled)\n\n # REPOSITION PLAYERS\n player_car(car_one.pos, 0)\n computer_one_car(car_two.pos, 0)\n computer_two_car(car_three.pos, 0)\n computer_three_car(car_four.pos, 0)\n pass\n\n\ndef question_display(choice_1, choice_2, choice_3, choice_4):\n gameDisplay.blit(choice_1, (245, 625))\n gameDisplay.blit(choice_2, (380, 625))\n gameDisplay.blit(choice_3, (515, 625))\n gameDisplay.blit(choice_4, (650, 625))\n\n\ndef game_start():\n\n #CAR OBJECTS\n car_one = cars.Racecar(cars.player, 1, 0, 150, 575, 700)\n car_two = cars.Racecar(cars.computer_one, 1, 0, 250, 375, 500)\n car_three = cars.Racecar(cars.computer_two, 1, 0, 350, 175, 300)\n car_four = cars.Racecar(cars.computer_three, 1, 0, 450, 0, 115)\n\n # CARS SETUP\n player_x_change = comp_1_change = comp_2_change = comp_3_change = 0\n\n # QUESTION SETUP\n currentQuestion = questions.QuestionLetter()\n currentQuestion.sound.play()\n correct = False\n raceFinished = False\n\n while not raceFinished:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raceFinished = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == currentQuestion.key:\n player_x_change = randint(1, 20)\n comp_1_change = randint(1, 20)\n comp_2_change = randint(1, 20)\n comp_3_change = randint(1, 20)\n\n if event.key == pygame.K_LSHIFT:\n currentQuestion.sound.play()\n\n if event.type == pygame.KEYUP:\n if event.key == currentQuestion.key:\n player_x_change = comp_1_change = comp_2_change = comp_3_change = 0\n correct = True\n\n car_one.x += player_x_change\n car_two.x += comp_1_change\n car_three.x += comp_2_change\n car_four.x += comp_3_change\n\n car_one.dist_traveled += player_x_change\n car_two.dist_traveled += comp_1_change\n car_three.dist_traveled += comp_2_change\n car_four.dist_traveled += comp_3_change\n\n # PREPARE DISPLAY\n gameDisplay.fill((0, 0, 0))\n gameDisplay.blit(background, (0, 95, display_width, display_height))\n player_lapImg = lap_tracker(car_one.lap)\n comp_1_lapImg = lap_tracker(car_two.lap)\n comp_2_lapImg = lap_tracker(car_three.lap)\n comp_3_lapImg = lap_tracker(car_four.lap)\n display_lap(player_lapImg, comp_1_lapImg, comp_2_lapImg, comp_3_lapImg, car_one, car_two, car_three, car_four)\n displayQuestion = False\n while not displayQuestion:\n if currentQuestion.ready:\n question_display(currentQuestion.letter_one, currentQuestion.letter_two, currentQuestion.letter_three,\n currentQuestion.letter_four)\n displayQuestion = True\n\n player_car(car_one.x, car_one.y)\n computer_one_car(car_two.x, car_two.y)\n computer_two_car(car_three.x, car_three.y)\n computer_three_car(car_four.x, car_four.y)\n play = pygame.image.load(\"..\\Resources\\Misc\\play.png\")\n gameDisplay.blit(play, (100, 675))\n pygame.display.update()\n clock.tick(60)\n\n if correct:\n currentQuestion = questions.QuestionLetter()\n engine.play(maxtime=3000)\n currentQuestion.sound.play()\n correct = False\n\n car_one.set_lap()\n car_two.set_lap()\n car_three.set_lap()\n car_four.set_lap()\n\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, (255, 255, 255))\n return textSurface, textSurface.get_rect()\n\n\ndef game_menu():\n\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n gameDisplay.fill((0, 0, 0))\n largeText = pygame.font.Font('freesansbold.ttf', 115)\n TextSurf, TextRect = text_objects(\"Kinder Racing\", largeText)\n TextRect.center = ((display_width/2), (display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n gameDisplay.blit(cars.topMenu, (display_width/6, 150))\n gameDisplay.blit(cars.bottomMenu, (display_width/6, 500))\n\n mouse = pygame.mouse.get_pos()\n clicked = pygame.mouse.get_pressed()\n\n start = pygame.image.load(\"..\\Resources\\Misc\\play.png\")\n if 150+100 > mouse[0] > 150 and 450+50 > mouse[1] > 450:\n gameDisplay.blit(start, (150, 450))\n pygame.draw.rect(gameDisplay, (200, 0, 0), (550, 450, 100, 50))\n if clicked[0] == 1:\n gameDisplay.fill((0, 0, 0))\n game_start()\n elif 550+100 > mouse[0] > 550 and 450+50 > mouse[1] > 450:\n pygame.draw.rect(gameDisplay, (250, 0, 0), (550, 450, 100, 50))\n gameDisplay.blit(start, (150, 450))\n if clicked[0] == 1:\n pygame.quit()\n quit()\n else:\n gameDisplay.blit(start, (150, 450))\n pygame.draw.rect(gameDisplay, (200, 0, 0), (550, 450, 100, 50))\n\n quitText = pygame.font.Font('freesansbold.ttf', 20)\n textSurf, textRect = text_objects(\"Quit\", quitText)\n textRect.center = ((550 + (100/2)), (450+(50/2)))\n gameDisplay.blit(textSurf, textRect)\n\n pygame.display.update()\n clock.tick(15)\n\n\ngame_menu()\n\n","repo_name":"NolanMelander/KinderRacing","sub_path":"Interface/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73241470228","text":"from typing import List, Tuple, Optional, TYPE_CHECKING\nimport numpy as np\nimport networkx as nx\n\nif TYPE_CHECKING:\n from .base import SilicaGraph\n\n\ndef split_and_keep(s, sep):\n \"\"\"Adapted from https://stackoverflow.com/a/34120478.\n \"\"\"\n if not s:\n return [''] # consistent with string.split()\n\n # Find replacement character that is not used in string\n # i.e. just use the highest available character plus one\n # Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)\n p = chr(ord(max(s))+1)\n\n return s.replace(sep, p + sep).split(p)\n\n\ndef inflate_silica_graph(S: \"SilicaGraph\", thickness: float = 0.5):\n # top layer\n top = nx.Graph(S.copy())\n nx.relabel_nodes(\n top,\n mapping={\n i: f\"{i}_top\"\n for i in top.nodes\n },\n copy=False\n )\n\n # bottom layer\n bottom = nx.Graph(S.copy())\n nx.relabel_nodes(\n bottom,\n mapping={\n i: f\"{i}_bottom\"\n for i in bottom.nodes\n },\n copy=False\n )\n\n # move top and bottom nodes in Z direction\n for i in range(len(S)):\n top.nodes[f\"{i}_top\"][\"position\"] += (thickness / 2,)\n bottom.nodes[f\"{i}_bottom\"][\"position\"] += (-thickness / 2,)\n\n # combine\n G = nx.compose(top, bottom)\n\n # add middle oxygens below Si\n for i in range(len(S)):\n if S.nodes[i][\"type\"] == \"Si\":\n position = S.nodes[i][\"position\"] + (0,)\n G.add_node(f\"{i}_middle\", type=\"O\", position=position)\n G.add_edge(f\"{i}_top\", f\"{i}_middle\")\n G.add_edge(f\"{i}_middle\", f\"{i}_bottom\")\n # add extra needed attributes\n G.size_x = S.size_x\n G.size_y = S.size_y\n G.thickness = thickness\n # relabel nodes sequentially from 0 to simplify lammps export\n nx.relabel_nodes(\n G,\n mapping={\n node: i\n for i, node in enumerate(G.nodes)\n },\n copy=False\n )\n return G\n\n\ndef average_pbc(points: List[Tuple[float, float]], size_x: float, size_y: float):\n \"\"\"Compute the average of a set of points even if they go over PBC\"\"\"\n\n x0 = points[0]\n vectors_from_x0 = np.array([\n list(cyclic_vector(x0, v, size_x, size_y))\n for v in points[1:]\n ])\n av_x, av_y = np.array(x0) + np.sum(vectors_from_x0, axis=0) / len(points)\n average = (av_x % size_x, av_y % size_y)\n return average\n\n\ndef cyclic_vector(u, v, size_x, size_y):\n _, dx, dy = cyclic_distance(u, v, size_x, size_y)\n return (dx, dy)\n\n\ndef cyclic_distance(u, v, size_x, size_y):\n u_x, u_y = u\n v_x, v_y = v\n sx = np.sign(v_x - u_x)\n dx = np.abs(v_x - u_x)\n if dx > size_x / 2:\n sx *= -1\n dx = size_x - dx\n sy = np.sign(v_y - u_y)\n dy = np.abs(v_y - u_y)\n if dy > size_y / 2:\n sy *= -1\n dy = size_y - dy\n return np.sqrt(dx ** 2 + dy ** 2), sx*dx, sy*dy\n\n\ndef rotate_silicagraph_in_pbc(S, theta: Optional[float] = None):\n \"\"\"Rotate a silica graph inside PBC\"\"\"\n\n positions = nx.get_node_attributes(S, \"position\")\n positions = np.array([list(pos) for pos in positions.values()])\n types = nx.get_node_attributes(S, \"type\")\n types = np.array([x for x in types.values()])\n\n new_positions = []\n new_types = []\n for horizontal in [-S.size_x, 0, S.size_x]:\n for vertical in [-S.size_y, 0, S.size_y]:\n for pos, type in zip(positions, types):\n new_pos = [pos[0] + horizontal, pos[1] + vertical]\n new_positions.append(new_pos)\n new_types.append(type)\n\n new_positions = np.array(new_positions)\n new_types = np.array(new_types)\n\n mean = np.mean(new_positions, axis=0)\n if theta is None:\n theta = np.random.uniform(0, 2*np.math.pi)\n R = np.array([\n [np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]\n ])\n rotated_positions = (new_positions - mean).dot(R) + mean\n\n size = 0.5 * (S.size_x + S.size_y)\n\n b1 = rotated_positions[:, 0] >= -size / 2\n b2 = rotated_positions[:, 0] < 1.5 * size\n b3 = rotated_positions[:, 1] >= -size / 2\n b4 = rotated_positions[:, 1] < 1.5 * size\n\n inside_box = np.all([b1, b2, b3, b4], axis=0)\n final_positions = rotated_positions[inside_box]\n final_types = new_types[inside_box]\n return final_positions, final_types\n","repo_name":"ComplexityBiosystems/2D-silica-ML","sub_path":"Silica-dataset-generation/silicanets-master/silicanets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"22321443708","text":"#Build Weather App\nfrom tkinter import *\nimport requests\nimport json\nimport matplotlib.pyplot as plt\nimport math \n\nroot=Tk()\nroot.title(\"Weather Forecast\")\n\ndef LookUp():\n global dlbl,tlbl,rlbl,clbl,minlbl,maxlbl,sunriselbl,sunsetlbl,date,time,region,country,mini,maxi,sunrise,sunset\n '''global tlbl\n global rlbl\n global clbl\n global minlbl\n global maxlbl\n global avglbl\n global sunriselbl\n global sunsetlbl\n global date\n global time\n global region\n global country\n global mini\n global maxi\n\n global sunrise \n global sunset'''\n \n \n \n #Requesting API\n \n api_request=requests.get(\"https://api.weatherapi.com/v1/forecast.json?key=095c610ce26c429e87870327210601&q=\"+Area.get()+\"&days=1\")\n api=json.loads(api_request.content)\n \n #\n \n \n #Filtering out the required fields from the json file\n\n date=(api[\"forecast\"][\"forecastday\"][0][\"date\"])\n time=((api[\"location\"][\"localtime\"]).split())[1]\n region=(api[\"location\"][\"region\"])\n country=(api[\"location\"][\"country\"])\n mini=(api[\"forecast\"][\"forecastday\"][0][\"day\"][\"mintemp_c\"])\n maxi=(api[\"forecast\"][\"forecastday\"][0][\"day\"][\"maxtemp_c\"])\n avg=(api[\"forecast\"][\"forecastday\"][0][\"day\"][\"avgtemp_c\"])\n sunrise=(api[\"forecast\"][\"forecastday\"][0][\"astro\"][\"sunrise\"])\n sunset=(api[\"forecast\"][\"forecastday\"][0][\"astro\"][\"sunset\"])\n currtemp=(api[\"current\"][\"temp_c\"])\n #Creating name Labels\n d_lbl=Label(root,text=\"Date:\").grid(row=1,column=0)\n t_lbl=Label(root,text=\"Time:\").grid(row=2,column=0)\n r_lbl=Label(root,text=\"Region:\").grid(row=3,column=0)\n c_lbl=Label(root,text=\"Country:\").grid(row=4,column=0)\n min_lbl=Label(root,text=\"min temp(in C):\").grid(row=5,column=0)\n max_lbl=Label(root,text=\"max temp (in C):\").grid(row=6,column=0)\n avg_lbl=Label(root,text=\"avg temp:\").grid(row=7,column=0)\n curr_lbl=Label(root,text=\"current temp(in C):\").grid(row=8,column=0)\n sunrise_lbl=Label(root,text=\"Sunrise:\").grid(row=9,column=0)\n sunset_lbl=Label(root,text=\"Sunset:\").grid(row=10,column=0)\n search=Label(root,text=\"Search Region:\").grid(row=0,column=0)\n\n #Putting Vallue in the corresponding name Labels\n dlbl=Label(root,text=date).grid(row=1,column=1)\n tlbl=Label(root,text=time).grid(row=2,column=1)\n rlbl=Label(root,text=region).grid(row=3,column=1)\n clbl=Label(root,text=country).grid(row=4,column=1)\n minlbl=Label(root,text=mini).grid(row=5,column=1)\n maxlbl=Label(root,text=maxi).grid(row=6,column=1)\n avglbl=Label(root,text=avg).grid(row=7,column=1)\n sunriselbl=Label(root,text=sunrise).grid(row=9,column=1)\n sunsetlbl=Label(root,text=sunset).grid(row=10,column=1)\n currlbl=Label(root,text=currtemp).grid(row=8,column=1)\n\ndef ChangeText():\n dlbl=Label(root,text=\" \").grid(row=1,column=1)\n tlbl=Label(root,text=\" \").grid(row=2,column=1)\n rlbl=Label(root,text=\" \").grid(row=3,column=1)\n clbl=Label(root,text=\" \").grid(row=4,column=1)\n minlbl=Label(root,text=\" \").grid(row=5,column=1)\n maxlbl=Label(root,text=\" \").grid(row=6,column=1)\n avglbl=Label(root,text=\" \").grid(row=7,column=1)\n sunriselbl=Label(root,text=\" \").grid(row=9,column=1)\n sunsetlbl=Label(root,text=\" \").grid(row=10,column=1)\n currlbl=Label(root,text=\" \").grid(row=8,column=1)\n\n Area.delete(0,END)\n\ndef graph():\n maxdata=[]\n mindata=[]\n avgdata=[]\n api_request=requests.get(\"https://api.weatherapi.com/v1/forecast.json?key=095c610ce26c429e87870327210601&q=\"+Area.get()+\"&days=3\")\n api=json.loads(api_request.content)\n for ii in range(3):\n maxdata=maxdata+[api[\"forecast\"][\"forecastday\"][ii][\"day\"][\"maxtemp_c\"]]\n mindata=mindata+[api[\"forecast\"][\"forecastday\"][ii][\"day\"][\"mintemp_c\"]]\n avgdata=avgdata+[api[\"forecast\"][\"forecastday\"][ii][\"day\"][\"avgtemp_c\"]]\n \n xaxis=[api[\"forecast\"][\"forecastday\"][0][\"date\"],api[\"forecast\"][\"forecastday\"][1][\"date\"],api[\"forecast\"][\"forecastday\"][2][\"date\"]]\n plt.plot(xaxis,maxdata,'r-',label=\"maximum temperature\")\n plt.plot(xaxis,mindata,'b-',label=\"minimum temperature\")\n plt.plot(xaxis,avgdata,'g-',label=\"average temperature\")\n plt.xlabel(\"day\")\n plt.ylabel(\"temperature\")\n plt.legend()\n plt.show()\n \ndef newt():\n global days\n global dayslbl\n global new\n new=Tk()\n new.title(\"weather forecast data\")\n trend_btn=Button(new,text=\"Show Trend for the next 3 days\",command=graph).grid(row=1,column=0,columnspan=2,ipadx=30)\n new.mainloop()\n \n\n\n \n \n \n\n\n\n\n \n\n\n\n\n\n \nArea =Entry(root,width=40,borderwidth=5)\nArea.grid(row=0,column=1)\nbtn=Button(root,text=\"Search\",command=LookUp).grid(row=11,column=0,columnspan=2,ipadx=100)\nreset_btn=Button(root,text=\"Reset\",command=ChangeText).grid(row=12,column=0,columnspan=2,ipadx=104)\ngraph_btn=Button(root,text=\"Click to Graph the forecast\",command=newt).grid(row=13,column=0,columnspan=2,ipadx=48)\n\n\nroot.mainloop()\n\n","repo_name":"ckrishna89/Weather-Forecast-","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"29951822252","text":"from . import gromos_format as gf\nfrom .Errors import GromosFormatError\nimport sys\n\nNANOMETRE = 10.0\nPICOSECONDS = 20.455 #amber time unit (1/20.455 ps)\n\nclass Configuration:\n def __init__(self, io):\n blocks = gf.parse_blocks(io)\n nm = NANOMETRE\n ps = PICOSECONDS\n if \"GENBOX\" in blocks:\n genbox = blocks[\"GENBOX\"]\n self.boxtype = float(genbox[1])\n self.box_size = [ float(x)*nm for x in genbox[2].split() ]\n self.box_angle = [ float(x) for x in genbox[3].split() ]\n self.box_rotation = [ float(x) for x in genbox[4].split() ]\n self.box_origin = [ float(x)*nm for x in genbox[5].split() ]\n elif \"BOX\" in blocks:\n box = blocks[\"BOX\"]\n self.boxtype = 1 # assume rectangular box\n self.box_size = [ float(x)*nm for x in box[1].split() ]\n self.box_angle = [0.0, ] * 3\n self.box_rotation = [0.0, ] * 3\n self.box_origin = [0.0, ] * 3\n else:\n self.boxtype = 0 # assume vacuum box\n self.box_size = [0.0, ] * 3\n self.box_angle = [0.0, ] * 3\n self.box_rotation = [0.0, ] * 3\n self.box_origin = [0.0, ] * 3\n\n posblock, cols, types = (\n \"POSITION\",\n [5, 6, 6, 7, 15, 15, 15],\n (int, str, str, int, float, float, float),\n ) if \"POSITION\" in blocks else (\n \"POSITIONRED\",\n [15, 15, 15],\n (float, float, float),\n ) if \"POSITIONRED\" in blocks else (None, None, None)\n\n if None == posblock:\n raise GromosFormatError(\n \"No 'POSITION' or 'POSITIONRED' block found \"\\\n \"in coordinate file\"\n )\n columns = gf.parse_simple_columns(\n blocks[posblock],\n cols,\n types,\n header = False,\n )\n\n x,y,z = columns[-3:]\n\n if \"LATTICESHIFTS\" in blocks:\n sx,sy,sz = gf.parse_simple_columns(blocks[\"LATTICESHIFTS\"],\n [10,10,10],\n (int,int,int),\n header = False)\n else:\n numatoms = len(x)\n sx = [0]*numatoms\n sy,sz = list(sx), list(sx)\n\n if \"VELOCITY\" in blocks or \"VELOCITYRED\" in blocks:\n velblock, cols, types = (\"VELOCITY\",\n [5,6,6,7,15,15,15],\n (int,str,str,int,float,float,float),\n ) if \"VELOCITY\" in blocks \\\n else (\"VELOCITYRED\",\n [15,15,15],\n (float,float,float),\n )\n columns = gf.parse_simple_columns(blocks[velblock],\n cols,\n types,\n header = False)\n vx,vy,vz = columns[-3:]\n\n self.velocities = [ [xi*nm/ps, yi*nm/ps, zi*nm/ps]\n for xi,yi,zi in zip(vx,vy,vz) ]\n else:\n self.velocities = None\n\n bx,by,bz = self.box_size\n\n self.positions = [ [xi*nm+sxi*bx, yi*nm+syi*by, zi*nm+szi*bz]\n for xi,yi,zi,sxi,syi,szi in zip(x,y,z,sx,sy,sz) ]\n self.title = ''.join(blocks[\"TITLE\"][1:-1]).strip()\n\n def gather_molecules(self, topology):\n x = self.positions\n box = self.box_size\n bond_lists = (topology.bonds_wH, topology.bonds_woH)\n if sum(box) == 0:\n return []\n num_broken_bond_dims = 1\n while num_broken_bond_dims > 0:\n num_broken_bond_dims = 0\n for bonds in bond_lists:\n for bond in bonds:\n i,j = bond.atoms\n for d in range(3):\n if abs(x[i][d]-x[j][d]) > 0.5*box[d]:\n num_broken_bond_dims += 1\n if x[i][d] > x[j][d]:\n if x[j][d]>box[d]: raise(Exception(\"stuck in loop\"))\n x[j][d] += box[d]\n else:\n if x[i][d]>box[d]: raise(Exception(\"stuck in loop\"))\n x[i][d] += box[d]\n\n","repo_name":"ATB-UQ/gromos2amber","sub_path":"gromos2amber/Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"34525258174","text":"from typing import Callable\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\n\n\nclass ConvergenceCriterion(ABC):\n \"\"\"Class of convergence checkers to see if the optimization is converged.\"\"\"\n def __init__(\n self,\n optimization_iterations: int,\n kl_threshold: float,\n fraction_change_threshold: float,\n ):\n self.optimization_iterations = optimization_iterations\n self.kl_threshold = kl_threshold\n self.fraction_change_threshold = fraction_change_threshold\n \n @abstractmethod\n def is_not_converged(\n self,\n iteration: int,\n previous_nonvariance: np.ndarray,\n current_nonvariance: np.ndarray,\n kl: float,\n ) -> bool:\n \"\"\"Checks whether the method has reached convergence\"\"\"\n\n\nclass IterationCriterion(ConvergenceCriterion):\n def is_not_converged(self, iteration: int, previous_nonvariance: np.ndarray, current_nonvariance: np.ndarray, kl: float) -> bool:\n return iteration < self.optimization_iterations\n\n\nclass KLCriterion(ConvergenceCriterion):\n def is_not_converged(self, iteration: int, previous_nonvariance: np.ndarray, current_nonvariance: np.ndarray, kl: float) -> bool:\n if kl is None:\n return True\n else:\n return kl > self.kl_threshold\n\n\nclass ChangeCriterion(ConvergenceCriterion):\n def is_not_converged(self, iteration: int, previous_nonvariance: np.ndarray, current_nonvariance: np.ndarray, kl: float) -> bool:\n if previous_nonvariance is None:\n return True\n else:\n return np.abs(current_nonvariance - previous_nonvariance) / previous_nonvariance > self.fraction_change_threshold\n\n\n\ndef get_convergence_checker(\n convergence_method: str,\n optimization_iterations: int,\n kl_threshold: float,\n fraction_change_threshold: float,\n) -> ConvergenceCriterion:\n\n supported_convergence_methods = {\n \"iteration_count\": IterationCriterion,\n \"fraction_change_threshold\": ChangeCriterion,\n \"kl_threshold\": KLCriterion,\n }\n convergence_class = supported_convergence_methods.get(convergence_method)\n if convergence_class is None:\n raise NotImplementedError(\n f\"Convergence method {convergence_method} is not supported\"\n )\n\n return convergence_class(optimization_iterations, kl_threshold, fraction_change_threshold)\n","repo_name":"cjmcgill/ensemble_projection","sub_path":"ensemble_projection/convergence.py","file_name":"convergence.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"72310272470","text":"import os\nimport shutil\nimport pickle\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom transformers import AutoProcessor, CLIPModel\n\nfrom albumentations.augmentations.geometric.resize import SmallestMaxSize\nfrom albumentations.augmentations.crops.transforms import CenterCrop\n\nCLIP_MODEL = 'openai/clip-vit-base-patch32'\n\n\ndef create_url_suitable_filenames(source_images_folder, starting_image_folder):\n img_formats = ['png', 'jpg', 'jpeg']\n extension = lambda x: x.split('.')[-1].lower()\n \n source_images = [filename for filename in os.listdir(source_images_folder) if extension(filename) in img_formats]\n filename_mapping = {}\n files = []\n for idx, filename in enumerate(source_images):\n initial_path = os.path.join(source_images_folder, filename)\n new_path = os.path.join(starting_image_folder, f'{idx:010d}.{extension(filename)}')\n shutil.copyfile(initial_path, new_path)\n files += [new_path]\n \n return files\n\ndef calculate_small_emb(model, processor, img):\n inputs = {k:v for k,v in processor(images=img, return_tensors=\"pt\").items()}\n image_features = model.get_image_features(**inputs).flatten().detach().cpu()\n return image_features\n\ndef calculate_emb(model, processor, img):\n inputs = {k:v for k,v in processor(images=img, return_tensors=\"pt\").items()}\n image_features = model.vision_model(**inputs).last_hidden_state.flatten().detach().cpu()\n return image_features\n\ndef calculate_embs(model, processor, files, conf):\n embeddings = {}\n for file in tqdm(files):\n try:\n img = Image.open(file)\n key = file.replace(f'{conf.albums_folder}/', '')\n embeddings[key] = np.array(calculate_emb(model, processor, img))\n except:\n print(f'error occured while processing image: {file}')\n \n return embeddings\n\ndef init_images_embeddings(conf):\n source_images_folder = os.path.join(conf.albums_folder, conf.source_images_folder)\n starting_image_folder = os.path.join(conf.albums_folder, conf.starting_image_folder)\n \n if os.path.exists(source_images_folder) and not os.path.exists(starting_image_folder):\n print('CALCULATING CLIP EMBEDDINGS\\n')\n os.mkdir(starting_image_folder)\n files = create_url_suitable_filenames(source_images_folder, starting_image_folder)\n files = [file for file in files if os.path.exists(file)]\n\n model = CLIPModel.from_pretrained(CLIP_MODEL)\n processor = AutoProcessor.from_pretrained(CLIP_MODEL)\n\n embeddings = calculate_embs(model, processor, files, conf)\n\n with open(conf.embeddings_path, 'wb') as f:\n pickle.dump(embeddings, f)\n \ndef init_thumbnails(conf):\n starting_image_folder = os.path.join(conf.albums_folder, conf.starting_image_folder)\n thumbnails_folder = os.path.join(conf.albums_folder, conf.starting_image_folder.replace('/', '_th/'))\n \n if os.path.exists(starting_image_folder) and not os.path.exists(thumbnails_folder):\n os.mkdir(thumbnails_folder)\n print('GENERATING THUMBNAILS\\n')\n \n resize = SmallestMaxSize(conf.thumbnail_size)\n crop = CenterCrop(conf.thumbnail_size, conf.thumbnail_size)\n\n for filename in tqdm(os.listdir(starting_image_folder)):\n try:\n image_path = os.path.join(starting_image_folder, filename)\n thumbnail_path = os.path.join(thumbnails_folder, filename)\n\n image = np.array(Image.open(image_path))\n thumbnail = Image.fromarray(crop(**resize(image=image))['image'])\n thumbnail.save(thumbnail_path)\n except:\n print(f'error occured while generating thumbnail for image: {filename}')","repo_name":"aedismorah/ImageRanker","sub_path":"server/lib/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"24225070631","text":"import gobject\nimport gtk\nimport pango\nfrom os.path import join, isfile, isdir, dirname, basename, exists\nfrom mimetypes import guess_type\n\nimport sendfile_gui\nfrom communitymeta import Community\nfrom ioutils import filesize\nfrom content import Content_Meta\nfrom file_chooser_dlg import File_Chooser, FILE_CHOOSER_TYPE_FILE, FILE_CHOOSER_TYPE_DIR\nfrom filesharing import Share_Meta, FTYPE_DIRECTORY, FTYPE_FILE\nfrom general_dialogs import Download_Dialog\nfrom guiutils import new_scrollarea, GUI_Page, Action_List\nfrom openfile import open_file\nfrom pathname import ICON_DIR, get_dir\nfrom plugins import get_plugin_by_type\nfrom proximateprotocol import PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_SEND_FILE, \\\n PLUGIN_TYPE_NOTIFICATION, PLUGIN_TYPE_FILE_SHARING, FS_PURPOSE_SHARE, \\\n SHARE_DIR, SHARE_FILE\nfrom support import debug, warning\nfrom user import User\nfrom utils import cut_text, format_bytes\n\ncommunity = None\nfilesharing = None\nsendfile = None\nmain_gui = None\nnotification = None\n\nMAX_GUI_NAME = 32\n\ndef split_keywords(s):\n keywords = []\n fields = s.split(',')\n for word in fields:\n newwords = word.split()\n keywords += newwords\n return keywords\n\nclass File_Sharing_GUI:\n \"\"\" File_Sharing_GUI class includes all the gui parts of the\n file sharing plugin. All communication between main gui and\n file sharing plugin should be done through this class.\n\n File_Sharing_GUI includes window for searching content, window\n for browsing user's content and window for publishing own content.\n Also, dialogs for adding meta information to a content are initialized\n through this class.\n \"\"\"\n\n FILESHARING_ICON = '64px-search_content_icon.png'\n\n def __init__(self, gui, sendfilegui):\n global filesharing, community, sendfile, main_gui, notification\n filesharing = get_plugin_by_type(PLUGIN_TYPE_FILE_SHARING)\n community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)\n sendfile = get_plugin_by_type(PLUGIN_TYPE_SEND_FILE)\n notification = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)\n main_gui = gui\n self.sendfilegui = sendfilegui\n\n # store File_Sharing_Browse instances key = (target, is_community)\n self.browse_pages = {}\n\n # Initialize related pages\n self.fs_search = File_Sharing_Search(self)\n self.fs_results = File_Sharing_Browse(self, 'Search results')\n self.fs_publish = File_Sharing_Publish(self)\n\n icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), self.FILESHARING_ICON))\n community.community_gui.register_user_event(icon, 'Filesharing', self.start_filesharing_cb)\n community.community_gui.register_com_event(icon, 'Filesharing', self.start_filesharing_cb)\n\n main_gui.add_key_binding(gtk.gdk.CONTROL_MASK, gtk.keysyms.b, self.key_pressed_ctrl_b)\n\n def key_pressed_ctrl_b(self, target, ctx):\n iscom = isinstance(target, Community)\n self.show_browse_window(target, iscom)\n\n def start_filesharing_cb(self, target):\n if isinstance(target, User):\n self.start_filesharing_user_cb(target)\n elif isinstance(target, Community):\n self.start_filesharing_com_cb(target)\n\n def start_filesharing_user_cb(self, user):\n self.target = user\n self.com = False\n self.filesharing_dialog = Filesharing_Dialog(self.response_handler, user, False)\n\n def start_filesharing_com_cb(self, com):\n self.target = com\n self.com = True\n self.filesharing_dialog = Filesharing_Dialog(self.response_handler, com, True)\n\n def response_handler(self, widget, event, cmd_id):\n self.filesharing_dialog.dialog.set_modal(False)\n self.filesharing_dialog.dialog.destroy()\n\n function = None\n if cmd_id == Filesharing_Dialog.SEND_EVENT:\n function = self.sendfilegui.select_file_to_send\n elif cmd_id == Filesharing_Dialog.SEARCH_EVENT:\n function = self.fs_search.show_file_sharing_search\n elif cmd_id == Filesharing_Dialog.PUBLISH_EVENT:\n function = self.fs_publish.show_publish_window\n elif cmd_id == Filesharing_Dialog.BROWSE_EVENT:\n function = self.show_browse_window\n\n if function != None:\n function(self.target, self.com)\n\n def show_browse_window(self, target, is_community):\n key = (target, is_community)\n page = self.browse_pages.get(key)\n if page == None:\n page = File_Sharing_Browse(self, 'Browse content')\n self.browse_pages[key] = page\n page.show_browse_window(target, is_community)\n\nclass Filesharing_Dialog:\n \"\"\" Implements a dialog with possible actions for filesharing.\n\n Send file is also shown in this dialog although it is not implemented\n in this module.\n \"\"\"\n\n SEND_ICON = '128px-send_file_icon.png'\n SEND_ICON_LOW = '128px-send-low-connection.png'\n SEND_ICON_NO = '128px-send-no-connection.png'\n SEARCH_ICON = '128px-search_files_icon.png'\n PUBLISH_ICON = '128px-publish_file_icon.png'\n BROWSE_ICON = '128px-browse_network_files_icon.png'\n CANCEL_ICON = '128px-cancel_icon.png'\n\n SEND_EVENT = 1\n SEARCH_EVENT = 2\n PUBLISH_EVENT = 3\n BROWSE_EVENT = 4\n CANCEL_EVENT = 5\n\n def __init__(self, response_handler_func, target, is_community):\n self.main_window = main_gui.get_main_window()\n self.dialog = gtk.Dialog('Filesharing', self.main_window)\n self.dialog.set_has_separator(False)\n self.response_handler = response_handler_func\n self.target = target\n self.is_community = is_community\n\n self.initialize_widgets()\n self.dialog.set_modal(True)\n self.dialog.show_all()\n\n def initialize_widgets(self):\n self.hbox = gtk.HBox()\n self.hbox.set_spacing(5)\n self.hbox.set_property(\"border-width\", 0)\n\n send_ebox = gtk.EventBox()\n publish_ebox = gtk.EventBox()\n browse_ebox = gtk.EventBox()\n search_ebox = gtk.EventBox()\n cancel_ebox = gtk.EventBox()\n\n send_image = gtk.Image()\n publish_image = gtk.Image()\n browse_image = gtk.Image()\n search_image = gtk.Image()\n cancel_image = gtk.Image()\n\n # warn user of bad connectivity for sending files\n if not self.is_community:\n hops = self.target.get('hops')\n if hops == 2:\n send_icon = self.SEND_ICON_LOW\n elif hops > 2:\n send_icon = self.SEND_ICON_NO\n else:\n send_icon = self.SEND_ICON\n else:\n send_icon = self.SEND_ICON\n send_image.set_from_file(join(get_dir(ICON_DIR), send_icon))\n publish_image.set_from_file(join(get_dir(ICON_DIR), self.PUBLISH_ICON))\n browse_image.set_from_file(join(get_dir(ICON_DIR), self.BROWSE_ICON))\n search_image.set_from_file(join(get_dir(ICON_DIR), self.SEARCH_ICON))\n cancel_image.set_from_file(join(get_dir(ICON_DIR), self.CANCEL_ICON))\n\n send_ebox.add(send_image)\n publish_ebox.add(publish_image)\n browse_ebox.add(browse_image)\n search_ebox.add(search_image)\n cancel_ebox.add(cancel_image)\n\n send_ebox.connect(\"button-press-event\", self.response_handler,\n self.SEND_EVENT)\n publish_ebox.connect(\"button-press-event\", self.response_handler,\n self.PUBLISH_EVENT)\n browse_ebox.connect(\"button-press-event\", self.response_handler,\n self.BROWSE_EVENT)\n search_ebox.connect(\"button-press-event\", self.response_handler,\n self.SEARCH_EVENT)\n cancel_ebox.connect(\"button-press-event\", self.response_handler,\n self.CANCEL_EVENT)\n\n vbox1 = gtk.VBox()\n vbox2 = gtk.VBox()\n vbox3 = gtk.VBox()\n vbox4 = gtk.VBox()\n vbox5 = gtk.VBox()\n \n vbox1.pack_start(publish_ebox, True, True)\n vbox2.pack_start(browse_ebox, True, True)\n vbox3.pack_start(search_ebox, True, True)\n vbox4.pack_start(cancel_ebox, True, True)\n vbox5.pack_start(send_ebox, True, True)\n \n vbox1.pack_start(gtk.Label('Publish'), False, False)\n vbox2.pack_start(gtk.Label('Browse'), False, False)\n vbox3.pack_start(gtk.Label('Search'), False, False)\n vbox4.pack_start(gtk.Label('Cancel'), False, False)\n vbox5.pack_start(gtk.Label('Send File'), False, False)\n\n self.hbox.pack_start(vbox5, True, True)\n if self.is_community:\n self.hbox.pack_start(vbox1, True, True)\n self.hbox.pack_start(vbox2, True, True)\n self.hbox.pack_start(vbox3, True, True)\n self.hbox.pack_start(vbox4, True, True)\n\n self.dialog.vbox.pack_start(self.hbox, True, True, 0)\n self.dialog.action_area.set_size_request(0, 0)\n self.dialog.vbox.set_spacing(0)\n self.dialog.vbox.show_all()\n\nclass File_Sharing_Search(GUI_Page):\n \"\"\" File_Sharing_Search includes GUI window for searching user's\n files. The window is in two parts, first the search window is shown\n and after search is clicked the gui moves to the next window which shows\n the search results.\n\n Currently one limitation: multiple search windows can not be open\n at the same time. If a new window is opened the old one is replaced\n with the new one.\"\"\"\n\n def __init__(self, fs_gui):\n GUI_Page.__init__(self, 'Search content')\n self.fs_gui = fs_gui\n self.target = None\n self.is_community = False\n\n self.vbox = gtk.VBox()\n self.pack_start(self.vbox)\n\n self.entries = {}\n self.initialize_search_window()\n self.initialize_action_list()\n \n self.title = gtk.Label('Search Content')\n self.vbox.pack_start(self.title, False, False)\n self.vbox.pack_start(gtk.HSeparator(), False, False)\n self.vbox.pack_start(self.search_fwindow, True, True)\n\n self.show_all()\n main_gui.add_page(self)\n\n def initialize_search_window(self):\n self.search_fwindow = new_scrollarea()\n self.search_vbox = gtk.VBox()\n self.search_fwindow.add_with_viewport(self.search_vbox)\n\n entrylist = [('keywords', 'Keywords:'), \\\n ('fname', 'Filename:'), \\\n ('title', 'Title:'), \\\n ('author', 'Author:'), \\\n ('description', 'Description:')]\n\n for (key, header) in entrylist:\n hbox = gtk.HBox()\n label = gtk.Label(header)\n label.set_size_request(130, -1)\n label.set_alignment(0, 0)\n hbox.pack_start(label, False, False)\n\n entry = gtk.Entry()\n self.entries[key] = entry\n entry.connect(\"activate\", self.search_cb)\n hbox.pack_start(entry, True, True)\n self.search_vbox.pack_start(hbox, False, False)\n\n def initialize_action_list(self):\n search_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-search_content_icon.png\"))\n remove_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-remove_content_icon.png\"))\n\n action_buttons = [(search_icon, 'Search', self.search_cb),\n (remove_icon, 'Clear', self.clear_cb)\n ]\n\n self.actions = Action_List()\n\n for action in action_buttons:\n (icon, text, cb) = action\n self.actions.add_button(icon, text, cb)\n\n self.pack_start(self.actions.get_widget(), False, True)\n\n def clear_cb(self, widget):\n for (key, entry) in self.entries.items():\n entry.set_text('')\n self.entries['keywords'].grab_focus()\n\n def search_cb(self, widget):\n query = {}\n for (key, entry) in self.entries.items():\n query[key] = entry.get_text()\n\n keywords = None\n criteria = []\n\n text = self.entries['keywords'].get_text().strip()\n if text != '':\n keywords = split_keywords(text)\n if len(keywords) == 0:\n keywords = None\n\n for field in ['fname', 'title', 'author', 'description']:\n text = self.entries[field].get_text().strip()\n if text != '':\n criteria.append((field, text))\n if len(criteria) == 0:\n criteria = None\n\n self.fs_gui.fs_results.show_browse_window(self.target, self.is_community, criteria=criteria, keywords=keywords)\n\n def show_file_sharing_search(self, target, is_community):\n \"\"\" Opens the whole file sharing window.\"\"\"\n\n if is_community:\n target_name = target.get('name')\n else:\n target_name = target.get('nick')\n\n self.is_community = is_community\n self.target = target\n\n self.title.set_text('Search content from: %s' % target_name)\n\n self.entries['keywords'].set_property(\"can-focus\", True)\n\n self.set_page_title(target_name, sub=True)\n main_gui.show_page(self)\n self.entries['keywords'].grab_focus()\n \nclass File_Sharing_Browse(GUI_Page):\n \"\"\" File_Sharing_Browse includes GUI window for browsing user's\n file shares. \"\"\"\n\n COL_SHAREMETA = 0\n COL_SHAREPATH = 1\n COL_USER = 2\n COL_TYPE = 3\n COL_ICON = 4\n COL_GUINAME = 5\n COL_NICK = 6\n COL_SIZE = 7\n COL_COLOR = 8 # internal attribute: depends on hop count\n COL_HOPS = 9\n\n # colors for different hopcounts: 0 = 1 = foreground color, 2 = yellow, 3 or more = red\n HOP_COLORS = [None, None, \"yellow\", \"red\"]\n\n def __init__(self, fs_gui, title):\n GUI_Page.__init__(self, title)\n self.fs_gui = fs_gui\n self.target = None\n self.is_community = False\n self.items = 0\n\n self.vbox = gtk.VBox()\n self.pack_start(self.vbox)\n\n self.folders = {}\n\n self.content_list = gtk.TreeStore(gobject.TYPE_PYOBJECT, str, gobject.TYPE_PYOBJECT, bool, gtk.gdk.Pixbuf, str, str, str, str, int)\n self.content_list.set_sort_column_id(self.COL_HOPS, gtk.SORT_ASCENDING)\n\n self.initialize_browse_list()\n self.initialize_action_list()\n\n self.title = gtk.Label(\"Content Browsing\")\n self.vbox.pack_start(self.title, False, False)\n scrollwin = new_scrollarea()\n scrollwin.add_with_viewport(self.browse_list_view)\n self.vbox.pack_start(scrollwin, True, True)\n\n self.show_all()\n main_gui.add_page(self)\n\n def back_action(self):\n if self == self.fs_gui.fs_results:\n return False\n key = (self.target, self.is_community)\n self.fs_gui.browse_pages.pop(key)\n main_gui.remove_page(self)\n self.destroy()\n return True\n\n def initialize_browse_list(self):\n self.browse_list_view = gtk.TreeView(self.content_list)\n # self.browse_list_view.get_selection().set_mode(gtk.SELECTION_MULTIPLE)\n self.browse_list_view.set_headers_visible(False)\n\n column = gtk.TreeViewColumn('')\n self.browse_list_view.append_column(column)\n column.set_expand(True)\n cr_icon = gtk.CellRendererPixbuf()\n cr_guiname = gtk.CellRendererText()\n column.pack_start(cr_icon, False)\n column.pack_start(cr_guiname)\n column.add_attribute(cr_icon, 'pixbuf', self.COL_ICON)\n column.add_attribute(cr_guiname, 'text', self.COL_GUINAME)\n column.add_attribute(cr_guiname, 'foreground', self.COL_COLOR)\n\n column = gtk.TreeViewColumn('')\n self.browse_list_view.append_column(column)\n cr_nick = gtk.CellRendererText()\n column.pack_start(cr_nick)\n column.add_attribute(cr_nick, 'text', self.COL_NICK)\n\n column = gtk.TreeViewColumn('')\n self.browse_list_view.append_column(column)\n cr_size = gtk.CellRendererText()\n column.pack_start(cr_size)\n column.add_attribute(cr_size, 'text', self.COL_SIZE)\n\n def initialize_action_list(self):\n download_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-browse_content_icon.png\"))\n open_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-publish_content_icon.png\"))\n metadata_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-edit_metadata_icon.png\"))\n search_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-search_content_icon.png\"))\n\n action_buttons = [(download_icon, 'Download', self.download_cb),\n (open_icon, 'Stream', self.stream_cb),\n (metadata_icon, 'Show\\nMetadata', self.show_metadata_cb),\n (search_icon, 'Search\\nFrom User', self.show_search_cb),\n (search_icon, 'Refresh', self.refresh_cb)\n ]\n\n self.actions = Action_List()\n\n for action in action_buttons:\n (icon, text, cb) = action\n self.actions.add_button(icon, text, cb)\n\n self.pack_start(self.actions.get_widget(), False, True)\n\n def download_cb(self, widget):\n model, selected = self.browse_list_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No file selected!', highpri=True)\n return\n row = self.content_list[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n user = row[self.COL_USER]\n sharepath = row[self.COL_SHAREPATH]\n guiname = row[self.COL_GUINAME]\n directory = row[self.COL_TYPE]\n\n if directory:\n what = 'directory'\n else:\n what = 'file'\n\n ctx = (user, meta['id'], sharepath, directory, meta)\n Download_Dialog(main_gui.get_main_window(),\n 'Download a %s' % what,\n 'Downloading a %s from %s: %s' % (what, user.tag(), guiname),\n self.download_dialog_cb, ctx)\n\n def download_dialog_cb(self, accept, open_content, ctx):\n if not accept:\n return\n\n (user, shareid, sharepath, directory, meta) = ctx\n\n name = basename(sharepath)\n if directory and name == '' and meta.get('description'):\n name = meta.get('description')\n\n destname = filesharing.get_download_path(name)\n\n if directory:\n ctx = (sharepath, destname, name)\n if not filesharing.query(user, self.download_results, ctx, shareid=shareid, sharepath=sharepath):\n notification.notify('Unable to list directory contents from %s' % user.tag(), True)\n return\n\n notification.notify('Trying to download from %s' % user.tag())\n ctx = (user, destname, sharepath, open_content)\n if not filesharing.get_files(user, basename(sharepath), [(shareid, sharepath, destname)], download_complete, ctx):\n notification.ok_dialog('File sharing',\n 'Unable to download a file from %s: %s' % (user.tag(), name))\n\n def download_results(self, user, allresults, metadict, ctx):\n (rootpath, destpath, name) = ctx\n\n if allresults == None:\n notification.notify('Unable to list directory contents from %s' % user.tag(), True)\n return\n\n files = []\n totallen = 0\n for (shareid, sharepath, fsize, ftype) in allresults:\n # NOTE: The sharepath should begin with rootpath\n i = len(rootpath)\n while i < len(sharepath):\n if sharepath[i] != '/':\n break\n i += 1\n destname = join(destpath, sharepath[i:])\n files.append((shareid, sharepath, destname))\n totallen += fsize\n if not filesharing.get_files(user, name + '/', files, None, totallen=totallen):\n notification.ok_dialog('File sharing',\n 'Unable to download a directory from %s: %s' % (user.tag(), name))\n\n def show_metadata_cb(self, widget):\n model, selected = self.browse_list_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No file selected!', highpri=True)\n return\n row = self.content_list[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n user = row[self.COL_USER]\n sharepath = row[self.COL_SHAREPATH]\n guiname = row[self.COL_GUINAME]\n shareid = meta['id']\n\n ctx = (user, guiname)\n filesharing.get_metas(user, [(shareid, sharepath)], self.got_metadata_for_file, ctx)\n filesharing.progress_update('Getting metadata for content...')\n\n def got_metadata_for_file(self, metas, ctx):\n (user, guiname) = ctx\n\n filesharing.progress_update(None)\n\n if len(metas) == 0:\n msg = 'No metadata for file: %s' %(guiname)\n notification.ok_dialog('Filesharing', msg)\n return\n\n if len(metas) != 1:\n debug('FileSharing: Found too many metadatas for file\\n')\n return\n\n (shareid, fname, meta) = metas[0]\n\n Show_Metadata_Dialog(main_gui.get_main_window(), guiname, meta)\n\n def show_browse_window(self, target, is_community, criteria=None, keywords=None):\n if is_community:\n target_name = target.get('name')\n else:\n target_name = target.get('nick')\n\n self.is_community = is_community\n self.target = target\n self.criteria = criteria\n self.keywords = keywords\n\n self.set_page_title(target_name, sub=True)\n main_gui.show_page(self)\n self.update_content_list()\n \n def show_search_cb(self, widget):\n model, selected = self.browse_list_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No file selected!', highpri=True)\n return\n row = self.content_list[selected[0]]\n\n user = row[self.COL_USER]\n self.fs_gui.fs_search.show_file_sharing_search(user, False)\n\n def stream_cb(self, widget):\n model, selected = self.browse_list_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No file selected!', highpri=True)\n return\n row = self.content_list[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n user = row[self.COL_USER]\n sharepath = row[self.COL_SHAREPATH]\n\n filesharing.stream(user, meta['id'], sharepath)\n notification.notify('Trying to stream from %s' % user.tag())\n\n def update_content_list(self):\n self.content_list.clear()\n self.folders = {}\n\n self.items = 0\n self.title.set_text('No content found')\n\n if not self.is_community:\n if self.target == community.get_myself():\n # do not fetch own shares\n return\n if not filesharing.query(self.target, self.query_results, criteria=self.criteria, keywords=self.keywords):\n notification.notify('Unable to query shares from %s' % self.target.tag(), True)\n\n else: # Community\n filesharing.query_community(self.target, self.query_results, criteria=self.criteria, keywords=self.keywords)\n\n def refresh_cb(self, widget):\n self.update_content_list()\n\n def query_results(self, user, allresults, metadict, ctx):\n if allresults == None:\n notification.notify('Unable to query shares from %s' % user.tag(), True)\n return\n\n for (shareid, sharepath, fsize, ftype) in allresults:\n meta = metadict[shareid]\n\n size = ''\n if ftype == FTYPE_FILE:\n size = format_bytes(fsize)\n self.add_item(meta, user, shareid, sharepath, size, ftype == FTYPE_DIRECTORY)\n self.items += 1\n\n self.title.set_text('Showing %d items' % self.items)\n\n def add_item(self, meta, user, shareid, sharepath, fsize, directory):\n key = (user, shareid, sharepath)\n riter = self.folders.get(key)\n if riter != None:\n return riter\n\n if sharepath != '/' and meta.get('type') == SHARE_DIR:\n parent_path = dirname(sharepath)\n parent = self.add_item(meta, user, shareid, parent_path, '', True)\n else:\n parent = None\n\n guiname = basename(sharepath)\n\n if directory:\n filetype = 'folder'\n if guiname == '' and meta.get('description'):\n guiname = meta.get('description')\n guiname += '/'\n else:\n filetype = get_filetype(sharepath)\n\n guiname = cut_text(guiname, MAX_GUI_NAME)\n\n ft_icon = get_filetype_icon(filetype)\n nick = user.get('nick')\n hops = user.get('hops')\n if hops == None:\n hops = 0\n if hops < 4:\n color = self.HOP_COLORS[hops]\n else:\n color = self.HOP_COLORS[3]\n riter = self.content_list.append(parent, [meta, sharepath, user, directory, ft_icon, guiname, nick, fsize, color, hops])\n\n if directory:\n self.folders[key] = riter\n\n return riter\n\nclass Show_Metadata_Dialog:\n\n def __init__(self, main_gui_window, guiname, meta):\n self.meta = meta\n\n self.dialog = gtk.Dialog('%s\\'s Metadata' %guiname, main_gui_window,\n gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,\n (gtk.STOCK_OK, gtk.RESPONSE_OK))\n self.dialog.set_border_width(5)\n self.dialog.vbox.set_spacing(2)\n\n self.meta_widgets = {}\n\n self.initialize_widgets()\n\n self.dialog.connect(\"response\", self.response_handler)\n self.dialog.show_all()\n\n def response_handler(self, widget, event):\n self.dialog.destroy()\n\n def initialize_widgets(self):\n main_vbox = gtk.VBox()\n\n meta_components = (('title', 'Title:'),\n ('keywords', 'Keywords:'),\n ('author', 'Author:'),\n ('year', 'Year:'),\n ('type', 'File type:'),\n ('description', 'Description:'))\n\n has_metadata = False\n for (key, header) in meta_components:\n value = self.meta.get(key)\n if value == None or value == []:\n continue\n\n has_metadata = True\n text = str(value)\n\n hbox = gtk.HBox()\n label = gtk.Label(header)\n label.set_size_request(150, -1)\n label.set_alignment(0, 0)\n label.modify_font(pango.FontDescription(\"Bold\"))\n hbox.pack_start(label, False, False)\n\n label = gtk.Label(text)\n label.set_size_request(250, -1)\n label.set_alignment(0, 0)\n label.set_line_wrap(True)\n\n hbox.pack_start(label, True, True)\n main_vbox.pack_start(hbox, False, False)\n\n if not has_metadata:\n main_vbox.pack_start(gtk.Label('Empty metadata'))\n\n self.dialog.vbox.add(main_vbox)\n\nclass Edit_Metadata_Dialog:\n\n def __init__(self, main_gui_window, shareid, guiname, sharepath, fs_gui):\n self.fs_gui = fs_gui\n self.share = filesharing.get_share(shareid)\n if self.share == None:\n warning('Bad share in Edit_Metadata_Dialog %d\\n' % shareid)\n return\n self.sharepath = sharepath\n self.meta = self.share.get_filemeta(self.sharepath, forceread=True)\n if self.meta == None:\n self.meta = Content_Meta()\n\n self.dialog = gtk.Dialog('Edit %s\\'s Metadata' %(guiname), main_gui_window,\n gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,\n (gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))\n self.dialog.set_border_width(5)\n self.dialog.vbox.set_spacing(2)\n\n self.meta_widgets = {}\n\n self.initialize_widgets()\n\n self.dialog.connect(\"response\", self.response_handler)\n self.dialog.show_all()\n\n def response_handler(self, widget, event):\n if event == gtk.RESPONSE_OK:\n self.update_content_metadata()\n\n self.dialog.destroy()\n\n def initialize_widgets(self):\n main_vbox = gtk.VBox()\n\n meta_components = (('title', 'Title:'),\n ('keywords', 'Keywords:'),\n ('author', 'Author:'),\n ('year', 'Year:'),\n ('type', 'File type:'),\n ('description', 'Description:'))\n\n filetypes = ('application', 'audio', 'image', 'text', 'video')\n\n for (key, header) in meta_components:\n hbox = gtk.HBox()\n label = gtk.Label(header)\n label.set_size_request(150, -1)\n label.set_alignment(0, 0.5)\n hbox.pack_start(label, False, False)\n\n value = self.meta.get(key)\n\n text = ''\n if value != None:\n text = str(value)\n\n if key == 'type': # create dropdown box separately\n widget = gtk.combo_box_entry_new_text()\n for type in filetypes:\n widget.append_text(type)\n entry = widget.child\n entry.set_text(text)\n self.meta_widgets[key] = entry\n else:\n widget = gtk.Entry()\n widget.set_text(text)\n self.meta_widgets[key] = widget\n widget.set_size_request(250, -1)\n hbox.pack_start(widget, True, True)\n main_vbox.pack_start(hbox, False, False)\n\n self.dialog.vbox.add(main_vbox)\n\n def update_content_metadata(self):\n for (key, widget) in self.meta_widgets.items():\n value = widget.get_text() \n if value == '':\n value = None\n\n self.meta.set(key, value)\n\n self.share.update_meta(self.sharepath, self.meta)\n\nclass File_Sharing_Publish(GUI_Page):\n \"\"\" File_Sharing_Publish includes GUI window for adding shares. \"\"\"\n\n COL_SHAREMETA = 0\n COL_SHAREPATH = 1\n COL_TYPE = 2\n COL_ICON = 3\n COL_GUINAME = 4\n COL_SIZE = 5\n\n def __init__(self, fs_gui):\n GUI_Page.__init__(self, 'Publish content')\n self.fs_gui = fs_gui\n\n self.icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \\\n \"64px-publish_content_icon.png\"))\n \n self.vbox = gtk.VBox()\n self.pack_start(self.vbox)\n\n self.folders = {}\n\n self.sharelist = gtk.TreeStore(gobject.TYPE_PYOBJECT, str, bool, gtk.gdk.Pixbuf, str, str)\n\n self.initialize_share_list()\n self.initialize_action_list()\n\n self.title = gtk.Label(\"Content Publishing\")\n self.vbox.pack_start(self.title, False, False)\n scrollwin = new_scrollarea()\n scrollwin.add_with_viewport(self.sharelist_view)\n self.vbox.pack_start(scrollwin, True, True)\n\n self.show_all()\n main_gui.add_page(self)\n\n def initialize_share_list(self):\n self.sharelist_view = gtk.TreeView(self.sharelist)\n self.sharelist_view.set_headers_visible(False)\n\n column = gtk.TreeViewColumn('')\n self.sharelist_view.append_column(column)\n column.set_expand(True)\n cr_icon = gtk.CellRendererPixbuf()\n cr_guiname = gtk.CellRendererText()\n column.pack_start(cr_icon, False)\n column.pack_start(cr_guiname)\n column.add_attribute(cr_icon, 'pixbuf', self.COL_ICON)\n column.add_attribute(cr_guiname, 'text', self.COL_GUINAME)\n\n column = gtk.TreeViewColumn('')\n self.sharelist_view.append_column(column)\n cr_size = gtk.CellRendererText()\n column.pack_start(cr_size)\n column.add_attribute(cr_size, 'text', self.COL_SIZE)\n\n def initialize_action_list(self):\n add_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-add_content_icon.png\"))\n remove_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-remove_content_icon.png\"))\n open_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-publish_content_icon.png\"))\n metadata_icon = gtk.gdk.pixbuf_new_from_file(join(get_dir(ICON_DIR), \"64px-edit_metadata_icon.png\"))\n\n action_buttons = [(add_icon, 'Publish\\nFile', self.add_file_cb),\n (add_icon, 'Publish\\nDirectory', self.add_dir_cb),\n (remove_icon, 'Remove', self.remove_cb),\n (open_icon, 'Open', self.open_cb),\n (metadata_icon, 'Edit\\nMetadata', self.edit_metadata_cb)\n ]\n\n self.actions = Action_List()\n\n for action in action_buttons:\n (icon, text, cb) = action\n self.actions.add_button(icon, text, cb)\n\n self.pack_start(self.actions.get_widget(), False, True)\n \n def show_publish_window(self, target, is_community):\n if is_community:\n target_name = target.get('name')\n else:\n target_name = target.get('nick')\n\n self.sharelist_view.set_model(self.sharelist)\n self.sharelist_view.show_all()\n \n self.title.set_text('Share Content with %s' %(target_name))\n\n self.update_sharelist()\n \n main_gui.show_page(self)\n\n def add_file_cb(self, widget):\n File_Chooser(main_gui.get_main_window(), FILE_CHOOSER_TYPE_FILE, True, self.add_file_chooser_cb)\n\n def add_file_chooser_cb(self, filenames, ctx):\n if filenames == None:\n return\n\n for f in filenames:\n if isfile(f):\n sharemeta = Share_Meta()\n sharemeta.set('description', basename(f))\n filesharing.add_share(f, sharemeta=sharemeta, stype=SHARE_FILE)\n else:\n warning('Invalid filename from file chooser dialog\\n')\n\n self.update_sharelist()\n\n def add_dir_cb(self, widget):\n File_Chooser(main_gui.get_main_window(), FILE_CHOOSER_TYPE_DIR, False, self.add_dir_chooser_cb)\n\n def add_dir_chooser_cb(self, dir_name, ctx):\n if dir_name == None:\n return\n\n if isdir(dir_name):\n sharemeta = Share_Meta()\n sharemeta.set('description', basename(dir_name))\n filesharing.add_share(dir_name, sharemeta=sharemeta, stype=SHARE_DIR)\n else:\n warning('Invalid dirname from file chooser dialog\\n')\n\n self.update_sharelist()\n\n def open_cb(self, widget):\n model, selected = self.sharelist_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No file selected!', highpri=True)\n return\n row = self.sharelist[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n sharepath = row[self.COL_SHAREPATH]\n guiname = row[self.COL_GUINAME]\n shareid = meta['id']\n\n fullpath = filesharing.native_path(shareid, sharepath)\n if fullpath == None:\n warning('FileSharingGUI: Can not open shareid %d sharepath %s\\n' %(shareid, sharepath))\n return\n\n notification.notify('Opening content %s' %(guiname))\n\n if not open_file(fullpath):\n notification.ok_dialog('Can not open file', 'Can not open file: %s\\nUnknown format, or not supported.' %(fullpath))\n\n def remove_cb(self, widget):\n model, selected = self.sharelist_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No share selected!', highpri=True)\n return\n row = self.sharelist[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n shareid = meta['id']\n filesharing.remove_share(filesharing.get_share(shareid))\n\n self.update_sharelist()\n \n def edit_metadata_cb(self, widget):\n model, selected = self.sharelist_view.get_selection().get_selected_rows()\n if len(selected) == 0:\n notification.notify('No share selected!', highpri=True)\n return\n row = self.sharelist[selected[0]]\n\n meta = row[self.COL_SHAREMETA]\n sharepath = row[self.COL_SHAREPATH]\n guiname = row[self.COL_GUINAME]\n shareid = meta['id']\n\n Edit_Metadata_Dialog(main_gui.get_main_window(), shareid, guiname, sharepath, self)\n\n def update_sharelist(self):\n self.sharelist.clear()\n self.folders = {}\n\n for (shareid, share) in filesharing.shares.items():\n for (sharepath, ftype) in share.list_recursively().items():\n size = ''\n if ftype == FTYPE_FILE:\n nativepath = share.native_path(sharepath)\n size = format_bytes(filesize(nativepath))\n self.add_item(share.meta, shareid, sharepath, size, ftype == FTYPE_DIRECTORY)\n\n def add_item(self, meta, shareid, sharepath, fsize, directory):\n key = (shareid, sharepath)\n riter = self.folders.get(key)\n if riter != None:\n return riter\n\n if sharepath != '/' and meta.get('type') == SHARE_DIR:\n parent_path = dirname(sharepath)\n parent = self.add_item(meta, shareid, parent_path, '', True)\n else:\n parent = None\n\n guiname = basename(sharepath)\n\n if directory:\n filetype = 'folder'\n if guiname == '' and meta.get('description'):\n guiname = meta.get('description')\n guiname += '/'\n else:\n filetype = get_filetype(sharepath)\n\n guiname = cut_text(guiname, MAX_GUI_NAME)\n\n ft_icon = get_filetype_icon(filetype)\n riter = self.sharelist.append(parent, [meta, sharepath, directory, ft_icon, guiname, fsize])\n\n if directory:\n self.folders[key] = riter\n\n return riter\n\ndef download_complete(success, ctx):\n (user, destname, sharepath, open_content) = ctx\n if success and open_content:\n if not open_file(destname):\n notification.ok_dialog('Can not open file',\n 'Can not open file: %s\\nUnknown format, or not supported.' % (destname))\n if not success:\n notification.ok_dialog('File sharing',\n 'Unable to download a file from %s: %s' % (user.tag(), sharepath))\n\ndef get_filetype(sharepath):\n (mimetype, encoding) = guess_type(sharepath)\n if not mimetype:\n return None\n return mimetype.split('/')[0]\n\ndef get_filetype_icon(filetype):\n ft_default = '32px-Text-x-generic-template.png'\n ft_icons = {'application': '32px-Applications-system.png',\n 'audio': '32px-Audio-x-generic.png',\n 'image': '32px-Image-x-generic.png',\n 'text': '32px-Text-x-generic.png',\n 'video': '32px-Video-x-generic.png',\n 'folder': '32px-Folder.png',\n 'remotefolder': '32px-Folder-remote.png',\n 'remotefolder-low': 'send-low-connection.png',\n 'remotefolder-no': 'send-no-connection.png',\n None: ft_default }\n\n ft_icon_path = join(get_dir(ICON_DIR), ft_icons.get(filetype, ft_default))\n return gtk.gdk.pixbuf_new_from_file(ft_icon_path)\n\ndef init_ui(main_gui):\n # We need to know about sendfile GUI, because the send action is started\n # from our GUI\n sendfilegui = sendfile_gui.init_ui(main_gui)\n File_Sharing_GUI(main_gui, sendfilegui)\n","repo_name":"proximate/proximate","sub_path":"filesharing_gui.py","file_name":"filesharing_gui.py","file_ext":"py","file_size_in_byte":39348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"43476200746","text":"\"\"\"\nPowell method for quadratic function\n\"\"\"\nimport numpy as np\nimport gold_section as gs\nimport plotter\n\ndef func(x):\n # Calculate object function value\n of = (x[0] - 1) ** 2 + (x[1] - 1) ** 2 - x[0] * x[1]\n # of = (x[0] - 1)**2 + (x[1] - 1)**2\n # of = fun + np.random.uniform(-1, 1)\n return of\n\n\nEPS = 1.e-3\nx0 = np.array([-1, 0])\nd1 = np.array([1, 0])\nd2 = np.array([0, 1])\npath = [x0,]\nx1 = gs.gold_section_d (x0, d1, func, EPS)\npath.append(x1)\nx2 = gs.gold_section_d (x1, d2, func, EPS)\npath.append(x2)\nx3 = gs.gold_section_d (x2, d1, func, EPS)\npath.append(x3)\ngradNew = x3 - x1\ndirGrad = gradNew/np.linalg.norm(gradNew, 2)\npath.append(x1)\nxOpt = gs.gold_section_d (x3, dirGrad, func, EPS)\npath.append(xOpt)\nprint(x1, func(x1))\nprint(x2, func(x2))\nprint(x3, func(x3))\nprint(f\"Optimal point: [{xOpt[0]:9.5f}, {xOpt[1]:9.5f}]\")\n\n# Objective function gradient plot\nnPoints = 100\nx1Lim = [-4, 4]\nx2Lim = [-4, 4]\nx1list = np.linspace(x1Lim[0], x1Lim[1], nPoints)\nx2list = np.linspace(x2Lim[0], x2Lim[1], nPoints)\nX1, X2 = np.meshgrid(x1list, x2list)\nObjFun = func([X1, X2])\n\nplotter.plot_graph(X1, X2, ObjFun, path)\n","repo_name":"otchkalov/optimization","sub_path":"Powell/powell_quadratic.py","file_name":"powell_quadratic.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"71020629908","text":"import sys\nimport logging\n\nfrom molgenis.capice.core.capice_manager import CapiceManager\nfrom molgenis.capice.utilities.custom_logging_filter import CustomLoggingFilter\n\n\nclass Logger:\n \"\"\"\n Singleton logger class developed by both:\n - Martijn Vochteloo\n - Robert Jarik Sietsma.\n Facilitates the python logging library\n \"\"\"\n\n class __Logger:\n def __init__(self):\n self.global_settings = CapiceManager()\n self.stdout = False\n self.stdout_filter = []\n self.stderr_loglevel = 50\n self.min_loglevel = 50\n self.set_stderr_loglevel()\n self.logger = None\n if self.logger is None:\n self.load_logger()\n\n def set_stderr_loglevel(self):\n \"\"\"\n Function to set the log level at where messages are printed or\n logged. For more information, see:\n https://docs.python.org/3/library/logging.html#logging-levels\n :return: logging level\n \"\"\"\n if not self.global_settings.critical_logging_only:\n self.stderr_loglevel = 30\n self.min_loglevel = 30\n if self.global_settings.loglevel and self.stderr_loglevel < 50:\n self.stdout = True\n self._set_stdout_filter()\n\n def _set_stdout_filter(self):\n \"\"\"\n Required because else Warning, Error and CRITICAL messages are\n printed to sys.stdout.\n \"\"\"\n logging_info = [logging.INFO]\n logging_debug = logging_info + [logging.DEBUG]\n dict_of_levels = {10: logging_debug, 20: logging_info}\n self.stdout_filter = dict_of_levels[self.global_settings.loglevel]\n self.min_loglevel = self.global_settings.loglevel\n\n def load_logger(self):\n \"\"\"\n Function to set up the logger instance with the stdout and stderr\n StreamHandlers (stdout assuming verbose flag is called) and the\n formatter.\n \"\"\"\n # Making a root logger to make sure the level is set correctly.\n logger = logging.getLogger()\n # Now renaming it to CAPICE.\n logger.name = 'CAPICE'\n\n # Capture warnings\n logging.captureWarnings(True)\n\n formatter = logging.Formatter(\n \"%(asctime)s \"\n \"%(levelname)8s: \"\n \"%(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n\n # Setting the log level to debug, but with an applied filter\n logger.setLevel(self.min_loglevel)\n\n # sys.stdout (if critical logging only isn't called and one of\n # the verbose flags is called.\n if self.stdout:\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setLevel(self.global_settings.loglevel)\n stdout_handler.setFormatter(formatter)\n # Filter out warning, error and critical messages.\n stdout_handler.addFilter(CustomLoggingFilter(self.stdout_filter))\n logger.addHandler(stdout_handler)\n\n # sys.stderr\n stderr_handler = logging.StreamHandler(sys.stderr)\n stderr_handler.setLevel(self.stderr_loglevel)\n stderr_handler.setFormatter(formatter)\n logger.addHandler(stderr_handler)\n self.logger = logger\n\n @property\n def logger(self):\n \"\"\"\n Property to get the logger instance.\n\n :return: logging.Logger\n \"\"\"\n return self._logger\n\n @logger.setter\n def logger(self, value):\n \"\"\"\n Setter for the logger instance.\n\n :param value:\n :return:\n \"\"\"\n self._logger = value\n\n @property\n def logger(self):\n \"\"\"\n Property to get the logger instance.\n\n :return: logging.Logger\n \"\"\"\n return self._logger\n\n instance = None\n\n def __new__(cls):\n \"\"\"\n Class method to set Logger instance\n :return: instance\n \"\"\"\n if not Logger.instance:\n Logger.instance = Logger.__Logger()\n return Logger.instance\n\n def __init__(self):\n \"\"\"\n __init__ method to set instance to Logger.__Logger()\n \"\"\"\n if not Logger.instance:\n Logger.instance = Logger.__Logger()\n\n def __getattr__(self, name):\n \"\"\"\n Method to return the value of the named attribute of name\n :param name: str\n :return: str\n \"\"\"\n return getattr(self.instance, name)\n","repo_name":"molgenis/capice","sub_path":"src/molgenis/capice/core/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"49"} +{"seq_id":"36881361838","text":"import numpy as np\nimport pytorch_lightning as pl\nimport torch\n\nfrom util import false_negative, false_positive, iou\n\nclass PerformanceMetricsMonitor(pl.Callback):\n '''Compute and log metrics about the predicted clusters in `FewShotModel`.'''\n\n def __init__(\n self,\n log_every_n_batches=1,\n main_tag='metrics',\n ):\n self.log_every_n_batches = log_every_n_batches\n self.main_tag = main_tag\n self.iou_train = []\n self.tp_train = 0\n self.fp_train = 0\n self.fn_train = 0\n self.iou_val = []\n self.tp_val = 0\n self.fp_val = 0\n self.fn_val = 0\n self.iou_test = []\n self.tp_test = 0\n self.fp_test = 0\n self.fn_test = 0\n\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):\n if batch_idx % self.log_every_n_batches == 0:\n pl_module.eval()\n x, y = batch\n predictions = pl_module.make_prediction(x, 'binary')\n if y.sum() > 0 or predictions.sum() > 0:\n self.iou_train.append(iou(predictions, y).cpu())\n # computing the f1 score for every batch and then averaging\n # is not the same as computing it over all of the batches collectively\n fn = false_negative(predictions, y)\n fp = false_positive(predictions, y)\n tp = y.sum() - fn\n self.fn_train += fn\n self.fp_train += fp\n self.tp_train += tp\n pl_module.train()\n\n def on_train_epoch_end(self, trainer, pl_module):\n pl_module.log(\n f'{self.main_tag}/iou/train',\n np.mean(self.iou_train),\n )\n pl_module.log(\n f'{self.main_tag}/f1/train',\n 2*self.tp_train / (2*self.tp_train + self.fp_train + self.fn_train),\n )\n pl_module.log(\n f'{self.main_tag}/precision/train',\n self.tp_train / (self.tp_train + self.fp_train),\n )\n pl_module.log(\n f'{self.main_tag}/recall/train',\n self.tp_train / (self.tp_train + self.fn_train),\n )\n self.iou_train = []\n self.tp_train = 0\n self.fp_train = 0\n self.fn_train = 0\n\n def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n x, y = batch\n predictions = pl_module.make_prediction(x, 'binary')\n if y.sum() > 0 or predictions.sum() > 0:\n self.iou_val.append(iou(predictions, y).cpu())\n else:\n self.iou_val.append(torch.FloatTensor([1]).cpu())\n # computing the f1 score for every batch and then averaging\n # is not the same as computing it over all of the batches collectively\n fn = false_negative(predictions, y)\n fp = false_positive(predictions, y)\n tp = y.sum() - fn\n self.fn_val += fn\n self.fp_val += fp\n self.tp_val += tp\n\n def on_validation_epoch_end(self, trainer, pl_module):\n pl_module.log(\n f'{self.main_tag}/iou/val',\n np.mean(self.iou_val),\n )\n pl_module.log(\n f'{self.main_tag}/f1/val',\n 2*self.tp_val / (2*self.tp_val + self.fp_val + self.fn_val),\n )\n pl_module.log(\n f'{self.main_tag}/precision/val',\n self.tp_val / (self.tp_val + self.fp_val),\n )\n pl_module.log(\n f'{self.main_tag}/recall/val',\n self.tp_val / (self.tp_val + self.fn_val),\n )\n self.iou_val = []\n self.tp_val = 0\n self.fp_val = 0\n self.fn_val = 0\n","repo_name":"MaHermann/filtering-specialized-change","sub_path":"callbacks/performance_metrics_monitor.py","file_name":"performance_metrics_monitor.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"12630656","text":"# Imports\nimport requests\nimport json\n\nurl = 'http://ombiurl:5000' # URL For OMBI\napikey = '1111111111111111111111111111' # OMBI ApiKey, can be found in ombi settings\nadminUser = 'yourAdminUser' # Required for v4 Preview\n\n# Authorization headers\nheaders = {\n \"ApiKey\": apikey,\n \"UserName\": adminUser\n}\n\n### MOVIES\n# Send request to get movies\nresponse = requests.get(url + '/api/v1/Request/movie', headers=headers)\n\n# Clean request and make pretty\njsonResponse = json.loads(response.content)\n\n# Find each movie in the requests that is available on plex\nfor availability in jsonResponse:\n if availability[\"markedAsAvailable\"]:\n deleteRequest = requests.delete(url + '/api/v1/Request/movie/' + str(availability[\"id\"]), headers=headers)\n\n\n### TV Shows\n# Send request to get movies\nresponse = requests.get(url + '/api/v1/Request/tv', headers=headers)\n\n# Clean request and make pretty\njsonResponse = json.loads(response.content)\n\n# Find each movie in the requests that is available on plex\nfor series in jsonResponse:\n # Availability is hiding in a subkey\n subkey = series[\"childRequests\"]\n for availability in subkey:\n if availability[\"markedAsAvailable\"]:\n deleteRequest = requests.delete(url + '/api/v1/Request/tv/' + str(series[\"id\"]), headers=headers)\n","repo_name":"oomathurman/ombiCleanup","sub_path":"ombiCleanup.py","file_name":"ombiCleanup.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"37580083901","text":"import sys\r\nimport cv2\r\nimport csv\r\nfrom mpl_toolkits.axes_grid1 import ImageGrid\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom random import randint, shuffle\r\nimport sklearn\r\nfrom p3_inputs import *\r\n\r\n# Functin to display images in a (?x3) matrix form\r\ndef show_images(X, Y, num, text, size=(10,10)):\r\n if( num < 3 ):\r\n n_rows = 1\r\n n_cols = num\r\n else:\r\n n_rows = int(num/3)\r\n n_cols = 3\r\n fig = plt.figure(1, figsize=size, frameon=False)\r\n grid = ImageGrid(fig, 111,nrows_ncols=(n_rows,n_cols),axes_pad=0.1)\r\n len_X = len(X)\r\n for i in range(n_rows*n_cols):\r\n idx = randint(0,len_X-1)\r\n grid[i].imshow(X[idx])\r\n grid[i].text(10,30,str(Y[idx]),backgroundcolor='white',)\r\n grid[i].axis('off')\r\n plt.suptitle(text)\r\n plt.show()\r\n\r\n# Function to calculate size of object in memory\r\ndef how_big(object):\r\n return (sys.getsizeof(object)/1024/1024)\r\n\r\n# Function to plot training data distribution in buckets\r\ndef training_data_distribution(Y_tr,bucket=[0.5,1.0]):\r\n data_dict = {}\r\n \r\n for i in range(len(bucket) - 1):\r\n key = str(bucket[i]) + \":\" + str(bucket[i+1])\r\n data_dict[key] = 0\r\n data_dict['0'] = 0\r\n \r\n for i in range(len(Y_tr)):\r\n key = ''\r\n for j in range(len(bucket)-1):\r\n if (Y_tr[i]==0):\r\n data_dict['0'] += 1\r\n break\r\n elif (Y_tr[i] >= bucket[j] and\\\r\n Y_tr[i] < bucket[j+1]):\r\n key = str(bucket[j]) + \":\" + str(bucket[j+1])\r\n data_dict[key] += 1\r\n break\r\n\r\n x = np.arange(len(data_dict))\r\n fig, ax = plt.subplots()\r\n bar = ax.bar(x, data_dict.values(),tick_label=data_dict.keys())\r\n plt.show()\r\n return data_dict\r\n\r\n# Function to display a simple progress bar\r\ndef drawProgressBar(percent, barLen = 20, text=\"\"):\r\n sys.stdout.write(\"\\r\")\r\n progress = \"\"\r\n for i in range(barLen):\r\n if i < int(barLen * percent):\r\n progress += \"=\"\r\n else:\r\n progress += \" \"\r\n msg = text + \"[ %s ] %.2f%%\" % (progress, percent * 100)\r\n sys.stdout.write(msg)\r\n sys.stdout.flush()\r\n\r\n# Function to read CSV and the images specified within it\r\ndef read_csv(csv_file): \r\n X_tr_list = []\r\n Y_tr_list = []\r\n counter = 0\r\n with open(csv_file,'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n temp = cv2.imread(row[0])\r\n # temp = cv2.cvtColor(temp,cv2.COLOR_BGR2RGB)\r\n X_tr_list.append(temp)\r\n Y_tr_list.append(float(row[3]))\r\n counter += 1\r\n sys.stdout.write(\"\\r\")\r\n sys.stdout.write(\"Loaded \\t{}\\t records...\".format(counter))\r\n sys.stdout.flush()\r\n \r\n X_tr = np.array(X_tr_list)\r\n Y_tr = np.array(Y_tr_list)\r\n print()\r\n return X_tr, Y_tr\r\n\r\n# Function to generate a master CSV file\r\ndef generate_master_csv(csv_list,path,mode='read'):\r\n if (mode=='write'):\r\n fmode = 'w'\r\n else:\r\n fmode = 'a'\r\n with open(path,fmode,newline='') as f:\r\n writer = csv.writer(f)\r\n for csv_path in csv_list:\r\n with open(csv_path,'r') as f_:\r\n reader = csv.reader(f_)\r\n for row in reader:\r\n writer.writerow(row)\r\n\r\n# Crop and resize the image\r\ndef crop_and_resize(img):\r\n y1 = 60\r\n y2 = 140\r\n \r\n # Crop the image\r\n cropped = img[y1:y2, :]\r\n\r\n # Resize the image\r\n resized = cv2.resize(cropped,(160,40))\r\n\r\n return resized\r\n\r\n# Generator for training and validation data\r\ndef generator(samples, batch_size=128):\r\n num_samples = len(samples)\r\n while 1: # Loop forever so the generator never terminates\r\n shuffle(samples)\r\n for offset in range(0, num_samples, batch_size):\r\n batch_samples = samples[offset:offset+batch_size]\r\n\r\n images = []\r\n angles = []\r\n \r\n for batch_sample in batch_samples:\r\n center_image = cv2.imread(batch_sample[0])\r\n center_angle = float(batch_sample[3])\r\n \r\n images.append(center_image)\r\n angles.append(center_angle)\r\n \r\n # Add shadows\r\n cv2.imshow(\"Orig\",center_image)\r\n cv2.waitKey(0)\r\n shadowed = add_random_shadows(center_image)\r\n images.append(shadowed)\r\n angles.append(center_angle)\r\n cv2.imshow(\"Mod\",shadowed)\r\n cv2.waitKey(0)\r\n \r\n # Add random brightness\r\n adj_brightness = change_brightness(center_image)\r\n images.append(adj_brightness)\r\n angles.append(center_angle)\r\n \r\n cv2.imshow(\"Mod2\",adj_brightness)\r\n cv2.waitKey(0)\r\n \r\n X_train = np.array(images,dtype=np.float32)\r\n y_train = np.array(angles,dtype=np.float32)\r\n\r\n X_train = X_train / 127.5\r\n X_train = X_train - 1.\r\n yield sklearn.utils.shuffle(X_train, y_train)\r\n\r\n# Function to add random shadows to the image\r\ndef add_random_shadows(img):\r\n alpha = 0.35\r\n greys = { '1' : [220,220,220],\\\r\n '2' : [211,211,211],\\\r\n '3' : [192,192,192],\\\r\n '4' : [169,169,169],\\\r\n '5' : [128,128,128],\\\r\n '6' : [105,105,105],\\\r\n '7' : [119,136,153],\\\r\n '8' : [112,128,144],\\\r\n '9' : [47,79,79],\\\r\n '10' : [0,0,0]}\r\n grey_idx = np.random.randint(1,11)\r\n overlay = img.copy()\r\n output = img.copy()\r\n y1 = 0\r\n x1 = np.random.randint(1,161)\r\n x2 = np.random.randint(1,161)\r\n y2 = 40\r\n cv2.rectangle(overlay, \\\r\n (x1,y1),\\\r\n (x2,y2),\\\r\n (greys[str(grey_idx)][0],greys[str(grey_idx)][1],greys[str(grey_idx)][2]),\\\r\n -1 )\r\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\r\n return output\r\n\r\n# Function to change the brightness of the image\r\ndef change_brightness(img):\r\n temp = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n temp = np.array(temp, dtype = np.float32)\r\n factor = np.random.uniform(low=0.9,high=1.1)\r\n temp[:,:,2] = temp[:,:,2]*factor\r\n temp[:,:,2][temp[:,:,2]>255] = 255\r\n temp = np.array(temp, dtype = np.uint8) \r\n temp = cv2.cvtColor(temp,cv2.COLOR_HSV2BGR)\r\n return temp\r\n\r\n# Function to add data with random brightness\r\ndef add_data_random_brightness(X,Y,range_num=[],num=0,path=None):\r\n x = []\r\n y = []\r\n idx_list = []\r\n\r\n for i in range(len(Y)):\r\n if (Y[i] >= range_num[0] and Y[i] < range_num[1]):\r\n idx_list.append(i)\r\n \r\n if (len(range_num)!=0): \r\n while (len(x) < num):\r\n idx = np.random.randint(0,len(idx_list))\r\n i = idx_list[idx]\r\n x.append(change_brightness(X[i]))\r\n y.append(Y[i])\r\n \r\n X_aug1 = np.array(x)\r\n Y_aug1 = np.array(y)\r\n print()\r\n y1 = 60\r\n y2 = 140\r\n\r\n with open(path,'a',newline='') as f:\r\n writer = csv.writer(f,delimiter=',')\r\n for i in range(len(X_aug1)):\r\n global_counter.counter += 1\r\n row = []\r\n img = ''\r\n img = img_path + str(global_counter.counter)\r\n img = img + \".png\"\r\n cv2.imwrite(img,X_aug1[i])\r\n msg = \"Writing file {0}/{1}...\".format(i+1,num)\r\n row.append(img)\r\n row.append(\"n/a\")\r\n row.append(\"n/a\")\r\n row.append(str(Y_aug1[i]))\r\n row.append(\"n/a\")\r\n row.append(\"n/a\")\r\n row.append(\"n/a\") \r\n writer.writerow(row)\r\n sys.stdout.write(\"\\r\")\r\n sys.stdout.write(msg)\r\n sys.stdout.flush()\r\n\r\n# Function to crop and resize data\r\ndef crop_resize_data(csv_file,csv_file_write,img_write=True):\r\n num_rows = 0\r\n with open(csv_file,'r') as f:\r\n reader = csv.reader(f)\r\n num_rows = len(list(reader))\r\n\r\n with open(csv_file,'r') as f:\r\n reader = csv.reader(f)\r\n with open(csv_file_write,'w',newline='') as g:\r\n writer = csv.writer(g)\r\n counter = 0\r\n for row in reader:\r\n global_counter.counter += 1\r\n row_list = []\r\n img = ''\r\n img = img_path + str(global_counter.counter)\r\n img = img + \".png\"\r\n if (img_write):\r\n temp = cv2.imread(row[0])\r\n mod_image = crop_and_resize(temp)\r\n cv2.imwrite(img,mod_image)\r\n msg = \"Writing file {0}/{1}...\".format(counter+1,num_rows)\r\n sys.stdout.write(\"\\r\")\r\n sys.stdout.write(msg)\r\n sys.stdout.flush()\r\n row_list.append(img)\r\n row_list.append(\"n/a\")\r\n row_list.append(\"n/a\")\r\n row_list.append(str(row[3]))\r\n row_list.append(\"n/a\")\r\n row_list.append(\"n/a\")\r\n row_list.append(\"n/a\") \r\n writer.writerow(row_list)\r\n counter += 1\r\n ","repo_name":"tkush/behavioral-cloning-deep-learning","sub_path":"p3_helperfunctions.py","file_name":"p3_helperfunctions.py","file_ext":"py","file_size_in_byte":9320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"19530840326","text":"####################################################################################################\n# #\n# (c) 2018, 2019 Quantstamp, Inc. This content and its use are governed by the license terms at #\n# <https://s3.amazonaws.com/qsp-protocol-license/V2_LICENSE.txt> #\n# #\n####################################################################################################\n\n\nimport logging\nimport logging.config\nimport os\nimport structlog\nimport sys\nimport traceback\nimport yaml\n\nfrom dpath.util import get\nfrom json import load\nfrom pprint import pprint\nfrom web3 import Web3\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass Program:\n\n __yaml_config = None\n __env = None\n\n @classmethod\n def __setup_basic_logging(cls, level):\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n logging.getLogger('botocore').setLevel(logging.CRITICAL)\n\n structlog.configure_once(\n context_class=structlog.threadlocal.wrap_dict(dict),\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n processors=[\n structlog.stdlib.filter_by_level,\n structlog.stdlib.add_logger_name,\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt=\"iso\"),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n structlog.processors.UnicodeDecoder(),\n structlog.stdlib.render_to_log_kwargs]\n )\n level_map = {\n 'CRITICAL': 50,\n 'ERROR': 40,\n 'WARNING': 30,\n 'INFO': 20,\n 'DEBUG': 10,\n 'NOTSET': 0,\n }\n dict_config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'json': {\n 'format': '%(message)s %(threadName)s %(lineno)d %(pathname)s ',\n 'class': 'pythonjsonlogger.jsonlogger.JsonFormatter'\n }\n },\n 'handlers': {\n 'json': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'json'\n },\n 'file': {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'json',\n 'filename': '/var/log/qsp-protocol/qsp-protocol.log',\n 'mode': 'a',\n 'maxBytes': 10485760,\n 'backupCount': 5\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['json', 'file'],\n 'level': level_map[level],\n }\n }\n }\n logging.config.dictConfig(dict_config)\n\n @classmethod\n def __setup_log_streaming(cls):\n # Load keystore\n keystore_file = get(Program.__yaml_config[Program.__env], \"/keystore_file\")\n with open(keystore_file) as k:\n keystore = load(k)\n\n # Get account\n account = Web3.toChecksumAddress('0x' + keystore['address'])\n\n # Get log streaming config (if any)\n log_streaming_config = None\n try:\n log_streaming_config = get(Program.__yaml_config[Program.__env], \"/logging/streaming\")\n except KeyError:\n pass\n\n # Initialize the log streaming module (should be done once)\n import log_streaming\n log_streaming.initialize(account, log_streaming_config)\n\n @classmethod\n def setup(cls, env, yaml_file_name, log_level):\n Program.__setup_basic_logging(log_level)\n Program.__env = env\n\n with open(yaml_file_name) as y:\n Program.__yaml_config = yaml.load(y)\n\n Program.__setup_log_streaming()\n\n @classmethod\n def run(cls, eth_passphrase, eth_auth_token, sol_file):\n \"\"\"\n Runs the backend\n \"\"\"\n # Note: except for stream_logger, every other import\n # to a subpackage of qsp_prototol_node must be\n # performed at this point\n\n from audit import QSPAuditNode\n from config import ConfigFactory\n from utils.stop import Stop\n\n cfg = ConfigFactory.create_from_dictionary(\n Program.__yaml_config,\n Program.__env,\n account_passwd=eth_passphrase,\n auth_token=eth_auth_token,\n )\n\n logger.info(\"Initializing QSP Audit Node\")\n logger.debug(\"account: {0}\".format(cfg.account))\n logger.debug(\"analyzers: {0}\".format(cfg.analyzers))\n logger.debug(\"audit contract address: {0}\".format(cfg.audit_contract_address))\n\n logger.debug(\"min_price_in_qsp: {0}\".format(cfg.min_price_in_qsp))\n logger.debug(\"evt_polling: {0}\".format(cfg.evt_polling))\n logger.debug(\"audit contract address: {0}\".format(cfg.audit_contract_address))\n\n # Based on the provided configuration, instantiates a new\n # QSP audit node\n audit_node = QSPAuditNode(cfg)\n Stop.register(audit_node)\n\n if QSPAuditNode.is_police_officer(cfg):\n logger.info(\"Running QSP node (performs audits and police checks)\")\n else:\n logger.info(\"Running QSP node (performs audits only)\")\n\n # If a sol file is given, produce the audit report for that file and exit\n if sol_file:\n _, audit_report = audit_node.get_full_report(\n requestor=cfg.account,\n uri=sol_file,\n request_id=1\n )\n pprint(audit_report)\n # Runs the QSP audit node in a busy loop fashion\n else:\n try:\n audit_node.start()\n except Exception as error:\n if audit_node is not None:\n audit_node.stop()\n raise error\n\n\nif __name__ == \"__main__\":\n logger = None\n try:\n Program.setup(\n os.environ['QSP_ENV'],\n os.environ['QSP_CONFIG'],\n os.environ['QSP_LOGGING_LEVEL']\n )\n sol_file = os.environ.get('SOL_FILE')\n\n from log_streaming import get_logger\n logger = get_logger(__name__)\n\n Program.run(os.environ['QSP_ETH_PASSPHRASE'], os.environ['QSP_ETH_AUTH_TOKEN'], sol_file)\n\n except Exception as error:\n if logger is not None:\n logger.exception(\"Error in running node: {0}\".format(str(error)))\n else:\n traceback.print_exc()\n # A non-zero exit code is required to auto-restart\n exit(1)\n","repo_name":"arshdeeptinna/qsp-protocol-node","sub_path":"src/qsp_protocol_node/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"23863771208","text":"# response_time_analysis.py\n\nimport requests\nimport time\n\nclass ResponseTimeAnalyzer:\n def __init__(self, target_url):\n self.target_url = target_url\n\n def analyze_response_time(self, payload):\n try:\n start_time = time.time()\n response = requests.get(self.target_url, params={\"param\": payload})\n end_time = time.time()\n \n response_time = end_time - start_time\n return response_time\n\n except requests.exceptions.RequestException as e:\n print(f\"Error: {e}\")\n return None\n\n# Example usage:\nif __name__ == \"__main__\":\n target_url = \"https://example.com/vulnerable_endpoint\"\n analyzer = ResponseTimeAnalyzer(target_url)\n \n # Analyze response time for a payload\n payload = \"1' OR IF(1=1, SLEEP(5), 0) --\"\n response_time = analyzer.analyze_response_time(payload)\n print(f\"Response time: {response_time} seconds\")\n","repo_name":"tadash10/Custom-WAF-Evasion","sub_path":"response_time_analysis.py","file_name":"response_time_analysis.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"35116119279","text":"import numpy as np\nimport cv2\nimport json\nimport os\nfrom .helpers import make_dir\n\n\ndef create_stick(filename, keypoints, save_path):\n \"\"\"\n Make a stick image given a json keypoints file\n \n :param string filename: name of image to save\n :param array keypoints\n :param string save_path: path to save image\n \"\"\"\n \n # Create keypoint pairs\n pose_point_pair = [[1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [2, 9], [9, 8], [8, 10], [10, 5]]\n\n face_point_pair = [\n [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], \n [10, 11], [11, 12], [12, 13], [13, 14], [14, 15], [15, 16], [17, 18], [18, 19], \n [19, 20], [20, 21], [22, 23], [23, 24], [24, 25], [25, 26], [27, 28], [28, 29], \n [29, 30], [30, 31], [31, 32], [32, 33], [33, 34], [34, 35], [36, 37], [37, 38], \n [38, 39], [39, 40], [40, 41], [36, 41], [42, 43], [43, 44], [44, 45], [45, 46], \n [46, 47], [42, 47], [48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], \n [54, 55], [55, 56], [56, 57], [57, 58], [58, 59], [48, 59], [60, 61], [61, 62], [62, 63], [63, 64]\n ]\n\n hand_point_pair = [[i, i+1] for i in range(0, 20)]\n \n \n for keypoint in range(len(keypoints)):\n pose = keypoints[keypoint][:22]\n face = keypoints[keypoint][30:170]\n left_hand = keypoints[keypoint][170:212]\n right_hand = keypoints[keypoint][212:255]\n\n part = [pose, face, left_hand, right_hand]\n part_num_points = [11, 68, 21, 21]\n part_pair = [pose_point_pair, face_point_pair, hand_point_pair, hand_point_pair]\n\n # Create paper\n img = np.zeros((1500, 1500), np.uint8)+255\n\n for p in range(len(part)):\n x = part[p][0::2]\n y = part[p][1::2]\n\n # Draw points\n for i in range(part_num_points[p]):\n cv2.circle(img, (int(x[i]*2048), int(y[i]*1152)), 2, (0, 255, 255), thickness=-1, lineType=cv2.FILLED) \n \n # Draw lines\n for pair in part_pair[p]:\n cv2.line(img, (int(x[pair[0]]*2048), int(y[pair[0]]*1152)), (int(x[pair[1]]*2048), int(y[pair[1]]*1152)), (0, 0, 255), 2)\n \n # Write the image frame\n cv2.imwrite(save_path + f'/{filename[:-5]}_{keypoint:03}.jpg', img)\n \n return\n\n################################################\n\ndef create_video(save_path):\n \"\"\"\n Make a video given a image\n\n :param string save_path: path to save video\n \"\"\"\n \n # Load stick imgages\n images = [img for img in os.listdir(save_path)]\n images.sort()\n\n # Image to video\n fps = 30\n\n frame_array = []\n for i in range(len(images)):\n filename = save_path + images[i]\n\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n\n frame_array.append(img)\n\n # Make Video\n out = cv2.VideoWriter(save_path + str(images[0])[:-8] + '.mp4', cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\n\n # Write the video frame\n for i in range(len(frame_array)):\n out.write(frame_array[i])\n\n # Release the video\n out.release()\n \n return\n\n################################################\n\ndef create_img_video(file_path, save_path, filename): \n \"\"\"\n Load json file and Make a stick image and video\n\n :param string file_path: path to load json file\n :param string save_path: path to save image and video\n :param string filename: name of image and video to save\n \"\"\"\n \n # Open keypoints json file in file_path\n f = open(file_path + filename, encoding=\"UTF-8\")\n keypoints = json.loads(f.read())\n\n save_path = save_path + filename[:-5] + '/'\n # Call func make_dir\n make_dir(save_path)\n \n # Make images\n create_stick(filename, keypoints, save_path)\n # Make video\n create_video(save_path)\n \n print(filename.split(\".json\")[0], ' img, video saving Complete!')\n \n return","repo_name":"Tobigs-team/Hand-to-Hand","sub_path":"text2keypoint/modeling/key2video.py","file_name":"key2video.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"49"} +{"seq_id":"33359062329","text":"#!/usr/bin/python3\n\n\"\"\"\nPath: lab4_main.py\nAuthor: Ricardo Franzen - rfranzen@gmail.com\nDate: 2022-01-17\nDescription: Update the hosts file with new hosts from a file\n**** CAUTION ****\n This script will overwrite the hosts file without check if \n the hosts are already in the file\n\nThis script will:\n 1. open the file specified in \"new_hosts_file\" variable\n 2. create a list of all hosts in this file\n 3. import this list from a different script (lab4_libs.py)\n 4. add the content under ipv4 section to the /etc/hosts file\n\"\"\"\n\nimport lab4_libs\nimport re\n\nnew_hosts = \"./files/dns_list.txt\"\nhosts_file = \"/etc/hosts\"\n\n# main\nif __name__ == '__main__':\n new_hosts_list = lab4_libs.get_hosts_from(new_hosts)\n hosts_content = lab4_libs.get_file_content(hosts_file)\n\n new_hosts_content = \"\" # new content for the hosts file\n\n for line in hosts_content.splitlines():\n new_hosts_content += line + \"\\n\"\n\n line_check = re.split(r'\\t+', line)\n if line_check[0] == \"127.0.0.1\" and line_check[1] == \"localhost\":\n for host in new_hosts_list:\n new_hosts_content += host[0] + \"\\t\" + host[1] + \"\\n\"\n \n lab4_libs.write_to_file(hosts_file, new_hosts_content)\n print(\"Done!\")\n","repo_name":"rz3n/learning-archive","sub_path":"python/lab4_main.py","file_name":"lab4_main.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"18648461080","text":"\"\"\"Define tricks.\n\"\"\"\n\nimport os\nimport signal\nimport subprocess\nimport time\n\nfrom string import Template\nfrom watchdog.utils import unicode_paths\n\nfrom pathtools.patterns import match_any_paths\nfrom watchdog.tricks import Trick\nfrom watchdog.events import EVENT_TYPE_CREATED, EVENT_TYPE_MODIFIED\nfrom watchdog.events import EVENT_TYPE_MOVED, EVENT_TYPE_DELETED\n\n\n\nclass AutoRunTrick(Trick):\n \"\"\"A variant of watchdog trick AutoRestartTrick.\n\n When instantiated without arguments, it's the same as a logger of file\n system events.\n It is intended to be used as the handler class with Dog.create_handler().\n\n Constructor Args:\n command:\n patterns:\n ignore_patterns:\n ignore_directories: The same as Dog class.\n stop_signal:\n kill_after: The same as Trick class.\n\n Attributes:\n command_default: A template string representing the default command.\n command: Readonly property, the shell command string.\n \"\"\"\n\n command_default = ('${event_object} ${event_src_path} is '\n '${event_type}${if_moved}')\n\n def __init__(self, command=None, patterns=None, ignore_patterns=None,\n ignore_directories=False, stop_signal=signal.SIGINT,\n kill_after=10):\n # Match Trick.__init__() signature.\n super().__init__(patterns, ignore_patterns, ignore_directories)\n self._command = command\n self._stop_signal = stop_signal\n self._kill_after = kill_after\n self._process = None\n\n def __eq__(self, value):\n return isinstance(value, self.__class__) and self.key == value.key\n\n def __ne__(self, value):\n return not self.__eq__(value)\n\n def __hash__(self):\n return hash(self.key)\n\n def __repr__(self):\n rstr = ('<AutoRunTrick: command={}, patterns={}, ignore_patterns={}, '\n 'ignore_directories={}>').format(*self.key)\n return rstr\n\n @staticmethod\n def _slash(event, path):\n \"\"\"Add trailing slash to path if event is directory event.\n\n Args:\n path: A path string, it's event.src_path or event.dest_path if\n exists.\n\n Returns:\n A path string with a trailing slash when necessary.\n \"\"\"\n if event.is_directory:\n path = os.path.join(path, '')\n return path\n\n\n def _substitute_command(self, event):\n if hasattr(event, 'dest_path'):\n dest_path = self._slash(event, event.dest_path)\n else:\n dest_path = ''\n if event.src_path:\n src_path = '%r' % self._slash(event, event.src_path)\n event_obj = 'directory' if event.is_directory else 'file'\n if_moved = ' to %r' % dest_path if dest_path else ''\n context = {\n 'event_object': event_obj,\n 'event_src_path': src_path,\n 'event_type': event.event_type,\n # 'event_dest_path': dest_path,\n 'if_moved': if_moved,\n }\n c = Template(type(self).command_default).safe_substitute(**context)\n return c\n\n @property\n def command(self):\n \"\"\"Readonly property, command string.\"\"\"\n return self._command\n\n def start(self, event=None):\n \"\"\"Execute a command according to context.\n\n It logs all file system events when self._command is None, or execute\n the command otherwise.\n\n Args:\n event: A file system event object.\n \"\"\"\n if self._command is None:\n if event is not None:\n command = self._substitute_command(event)\n print(command)\n else:\n self._process = subprocess.Popen(self._command, shell=True,\n start_new_session=True)\n\n def stop(self):\n \"\"\"Try to kill the shell command process at its best.\n \"\"\"\n if self._process is None:\n return\n try:\n os.killpg(os.getpgid(self._process.pid), self._stop_signal)\n except OSError:\n # Process is already gone.\n pass\n else:\n kill_time = time.time() + self._kill_after\n while time.time() < kill_time:\n if self._process.poll() is not None:\n break\n time.sleep(0.25)\n else:\n try:\n os.killpg(os.getpgid(self._process.pid, signal.SIGKILL))\n except OSError:\n pass\n self._process = None\n\n def on_any_event(self, event):\n \"\"\"Override superclass on_any_event, pass event to start().\"\"\"\n self.stop()\n self.start(event=event)\n\n @property\n def key(self):\n \"\"\"Get the tuple to calculate object hash value.\n\n Returns:\n A tuple containing object attributes.\n \"\"\"\n patterns = tuple(self.patterns) if self.patterns is not None \\\n else None\n ignore_patterns = tuple(self._ignore_patterns) \\\n if self._ignore_patterns is not None \\\n else None\n return (self.command, patterns, ignore_patterns,\n self.ignore_directories)\n\n def dispatch(self, event):\n \"\"\"Override superclass method.\n\n Append trailing slash to event src_path if it is a directory event and\n its dest_path if exists before matching using fnmatch.\n\n Args:\n event: The event object to dispatch.\n \"\"\"\n if event.is_directory and self._ignore_directories:\n return\n\n paths = []\n if hasattr(event, 'dest_path'):\n dest_path = self._slash(event, event.dest_path)\n paths.append(unicode_paths.decode(dest_path))\n if event.src_path:\n src_path = self._slash(event, event.src_path)\n paths.append(unicode_paths.decode(src_path))\n\n if match_any_paths(paths,\n included_patterns=self._patterns,\n excluded_patterns=self._ignore_patterns,\n case_sensitive=self.case_sensitive):\n self.on_any_event(event)\n method_map = {\n EVENT_TYPE_CREATED: self.on_created,\n EVENT_TYPE_MODIFIED: self.on_modified,\n EVENT_TYPE_MOVED: self.on_moved,\n EVENT_TYPE_DELETED: self.on_deleted,\n }\n event_type = event.event_type\n method_map[event_type](event)\n","repo_name":"StephenHesperus/arfarf","sub_path":"arfarf/tricks.py","file_name":"tricks.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"38726788000","text":"from PyQt5.QtCore import QSettings, QTranslator, qVersion, QCoreApplication\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QVariant\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QAction\nfrom PyQt5.QtWidgets import QActionGroup\nfrom PyQt5.QtWidgets import QMenu\nfrom PyQt5.QtWidgets import QWidgetAction\nfrom PyQt5.QtWidgets import QDialog\n# Initialize Qt resources from file resources.py\nfrom qgis.gui import QgsMapToolEmitPoint\nfrom qgis.core import QgsProject\nfrom qgis.core import QgsExpressionContextUtils\nfrom qgis.core import QgsExpressionContextScope\nfrom qgis.core import QgsGeometry\nfrom qgis.core import QgsFeature\nfrom qgis.core import QgsPointXY\nfrom qgis.core import QgsApplication\n# Import the code for the DockWidget\nfrom .resources import *\nfrom .GisFireSettings import GisFIRESettings\nfrom .GisFireUi import DlgIgnitionPoint\nfrom .GisFireUi import DockControl\nfrom .Helper import Layers\nfrom .Helper import Utils\n# Import simulation\nfrom .SpreadSimulation import Simulator\n\n\nimport os.path\n\nclass GisFIRE:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n\n # Save location where the plugin directory is located\n self.plugin_dir = os.path.dirname(__file__)\n\n # Initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'GisFIRE_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Initialization of UI references\n self.toolbarActions = {}\n self.menuActions = {}\n # Initialization of GisFIRE data layers\n self.layers = {}\n # Initialization of simulator structure\n self._simulator = {}\n self._simulator[\"init\"] = False\n self._simulator[\"simulator\"] = None\n # UI elements\n self._pointTool = None\n self._previousTool = None\n\n # Connect to project signals to allow plugin interacton when a new\n # project is created or loaded\n self.iface.newProjectCreated.connect(self.onNewProject)\n project = QgsProject.instance()\n if (project is not None):\n project.readProject.connect(self.onReadProject)\n project.projectSaved.connect(self.onSavedProject)\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('GisFIRE', message)\n\n def _addToolbarActions(self):\n \"\"\"Create the toolbar buttons that GisFIRE uses as shortcuts.\"\"\"\n # Toggle GisFIRE Pane\n action = QAction(QIcon(':/plugins/GisFire/gisfire.png'), self.tr('GisFIRE'), None)\n action.triggered.connect(self.onTogglePane)\n action.setEnabled(True)\n action.setCheckable(True)\n action.setStatusTip(self.tr('Toggle GisFIRE Pane'))\n action.setWhatsThis(self.tr('Toggle GisFIRE Pane'))\n self.toolbar.addAction(action)\n self.toolbarActions['gisfire'] = action\n # Convert Project to GisFIRE or back to QGIS\n action = QAction(QIcon(':/plugins/GisFire/convert_project.png'), self.tr('Convert Project'), None)\n action.triggered.connect(self.onConvertProject)\n action.setEnabled(True)\n action.setCheckable(False)\n action.setStatusTip(self.tr('Convert Project to GisFIRE or to Standard QGIS'))\n action.setWhatsThis(self.tr('Convert Project to GisFIRE or to Standard QGIS'))\n self.toolbar.addAction(action)\n self.toolbarActions['convert_project'] = action\n # Set ignition point\n action = QAction(QIcon(':/plugins/GisFire/ignition_point.png'), self.tr('Set Ignition Point'), None)\n action.triggered.connect(self.onSetIgnitionPoint)\n action.setEnabled(True)\n action.setCheckable(True)\n action.setStatusTip(self.tr('Set Ignition Point'))\n action.setWhatsThis(self.tr('Set Ignition Point'))\n self.toolbar.addAction(action)\n self.toolbarActions['ignition_point'] = action\n # Separator\n self.toolbar.addSeparator()\n # Simulation Play\n action = QAction(QIcon(':/plugins/GisFire/play.png'), self.tr('Run Simulation'), None)\n action.triggered.connect(self.onRunSimulation)\n action.setEnabled(True)\n action.setCheckable(False)\n action.setStatusTip(self.tr('Full run of the Fire Spread simulation'))\n action.setWhatsThis(self.tr('Full run of the Fire Spread simulation'))\n self.toolbar.addAction(action)\n self.toolbarActions['run_simulation'] = action\n # Simulation Step\n action = QAction(QIcon(':/plugins/GisFire/next.png'), self.tr('Step Simulation'), None)\n action.triggered.connect(self.onStepSimulation)\n action.setEnabled(True)\n action.setCheckable(False)\n action.setStatusTip(self.tr('Run one step of the Fire Spread simulation'))\n action.setWhatsThis(self.tr('Run one step of the Fire Spread simulation'))\n self.toolbar.addAction(action)\n self.toolbarActions['step_simulation'] = action\n # Simulation Pause\n action = QAction(QIcon(':/plugins/GisFire/pause.png'), self.tr('Pause Simulation'), None)\n action.triggered.connect(self.onPauseSimulation)\n action.setEnabled(True)\n action.setCheckable(False)\n action.setStatusTip(self.tr('Pause the Fire Spread simulation'))\n action.setWhatsThis(self.tr('Pause the Fire Spread simulation'))\n self.toolbar.addAction(action)\n self.toolbarActions['pause_simulation'] = action\n # Simulation Stop\n action = QAction(QIcon(':/plugins/GisFire/stop.png'), self.tr('Stop Simulation'), None)\n action.triggered.connect(self.onStopSimulation)\n action.setEnabled(True)\n action.setCheckable(False)\n action.setStatusTip(self.tr('Stop the Fire Spread simulation'))\n action.setWhatsThis(self.tr('Stop the Fire Spread simulation'))\n self.toolbar.addAction(action)\n self.toolbarActions['stop_simulation'] = action\n\n def _addMenuActions(self):\n \"\"\"Create the menu entries that allow GisFIRE procedures.\"\"\"\n # Toggle GisFIRE Pane\n action = self.menu.addAction(self.tr('Toggle GisFIRE Pane'))\n action.setIcon(QIcon(':/plugins/GisFire/gisfire.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onTogglePane)\n self.menuActions['gisfire'] = action\n # Convert Project to GisFIRE or back to QGIS\n action = self.menu.addAction(self.tr('Convert Project'))\n action.setIcon(QIcon(':/plugins/GisFire/convert_project.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onConvertProject)\n self.menuActions['convert_project'] = action\n # Set ignition point\n action = self.menu.addAction(self.tr('Set Ignition Point'))\n action.setIcon(QIcon(':/plugins/GisFire/ignition_point.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onSetIgnitionPoint)\n self.menuActions['ignition_point'] = action\n # Separator\n self.menu.addSeparator()\n # Simulation Play\n action = self.menu.addAction(self.tr('Run Simulation'))\n action.setIcon(QIcon(':/plugins/GisFire/play.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onRunSimulation)\n self.menuActions['run_simulation'] = action\n # Simulation Step\n action = self.menu.addAction(self.tr('Step Simulation'))\n action.setIcon(QIcon(':/plugins/GisFire/next.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onStepSimulation)\n self.menuActions['step_simulation'] = action\n # Simulation Pause\n action = self.menu.addAction(self.tr('Pause Simulation'))\n action.setIcon(QIcon(':/plugins/GisFire/pause.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onPauseSimulation)\n self.menuActions['pause_simulation'] = action\n # Simulation Stop\n action = self.menu.addAction(self.tr('Stop Simulation'))\n action.setIcon(QIcon(':/plugins/GisFire/stop.png'))\n action.setIconVisibleInMenu(True)\n action.triggered.connect(self.onStopSimulation)\n self.menuActions['stop_simulation'] = action\n\n def _addRelations(self):\n \"\"\"Create mutually exclusive relations between toolbar buttons.\"\"\"\n # Get the nav toolbar actions\n actions = self.iface.mapNavToolToolBar().actions()\n # Build a group with actions and add GisFIRE actions\n group = QActionGroup(self.iface.mainWindow())\n group.setExclusive(True)\n for action in actions:\n group.addAction(action)\n group.addAction(self.toolbarActions['ignition_point'])\n\n def _disableMenusAndToolbars(self):\n \"\"\"Disables all menu items and toolbar buttons of the GisFIRE UI\"\"\"\n self.toolbarActions['gisfire'].setEnabled(False)\n self.toolbarActions['convert_project'].setEnabled(False)\n self.toolbarActions['ignition_point'].setEnabled(False)\n self.toolbarActions['run_simulation'].setEnabled(False)\n self.toolbarActions['step_simulation'].setEnabled(False)\n self.toolbarActions['pause_simulation'].setEnabled(False)\n self.toolbarActions['stop_simulation'].setEnabled(False)\n self.menuActions['gisfire'].setEnabled(False)\n self.menuActions['convert_project'].setEnabled(False)\n self.menuActions['ignition_point'].setEnabled(False)\n self.menuActions['run_simulation'].setEnabled(False)\n self.menuActions['step_simulation'].setEnabled(False)\n self.menuActions['pause_simulation'].setEnabled(False)\n self.menuActions['stop_simulation'].setEnabled(False)\n\n def disableConvertProject(self):\n \"\"\"Disables the convert project (from and to GisFIRE) menu items and\n toolbar buttons of the GisFIRE UI\n \"\"\"\n self.toolbarActions['convert_project'].setEnabled(False)\n self.menuActions['convert_project'].setEnabled(False)\n\n def _enableMenusAndToolbars(self):\n \"\"\"Enables all menu items and toolbar buttons of the GisFIRE UI\"\"\"\n self.toolbarActions['gisfire'].setEnabled(True)\n self.toolbarActions['convert_project'].setEnabled(True)\n self.toolbarActions['ignition_point'].setEnabled(True)\n self.toolbarActions['run_simulation'].setEnabled(True)\n self.toolbarActions['step_simulation'].setEnabled(True)\n self.toolbarActions['pause_simulation'].setEnabled(True)\n self.toolbarActions['stop_simulation'].setEnabled(True)\n self.menuActions['gisfire'].setEnabled(True)\n self.menuActions['convert_project'].setEnabled(True)\n self.menuActions['ignition_point'].setEnabled(True)\n self.menuActions['run_simulation'].setEnabled(True)\n self.menuActions['step_simulation'].setEnabled(True)\n self.menuActions['pause_simulation'].setEnabled(True)\n self.menuActions['stop_simulation'].setEnabled(True)\n\n def _enableConvertProject(self):\n \"\"\"Disables the convert project (from and to GisFIRE) menu items and\n toolbar buttons of the GisFIRE UI\n \"\"\"\n self.toolbarActions['convert_project'].setEnabled(True)\n self.menuActions['convert_project'].setEnabled(True)\n\n def initGui(self):\n \"\"\"Initializes the QGIS GUI for the GisFIRE plugin.\"\"\"\n # setup the menu\n self.menu = QMenu(self.tr(u'Gis&FIRE'), self.iface.mainWindow().menuBar())\n actions = self.iface.mainWindow().menuBar().actions()\n lastAction = actions[-1]\n self.iface.mainWindow().menuBar().insertMenu(lastAction, self.menu)\n #setup the toolbar\n self.toolbar = self.iface.addToolBar(u'GisFIRE')\n self.toolbar.setObjectName(u'GisFIRE')\n #setup the GisFire pane\n self.dockwidget = None\n\n # Add toolbar buttons\n self._addToolbarActions()\n # Add menu entries\n self._addMenuActions()\n # Create relations with existing menus and buttons\n self._addRelations()\n # If there is an open project (that has been already saved) check if it\n # is a GisFIRE project and enable toolbar buttons and menus\n project_name = Utils.getProjectVariable(QgsProject.instance(), 'project_basename')\n if not type(project_name) is str:\n self._disableMenusAndToolbars()\n else:\n if Utils.isAGisFireProject(QgsProject.instance()):\n self._enableMenusAndToolbars()\n self._prepareProject()\n else:\n self._disableMenusAndToolbars()\n self._enableConvertProject()\n\n #--------------------------------------------------------------------------\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n # Remove toolbar items\n for action in self.toolbarActions.values():\n action.triggered.disconnect()\n self.iface.removeToolBarIcon(action)\n action.deleteLater()\n # Remove toolbar\n self.toolbar.deleteLater()\n # Remove menu items\n for action in self.menuActions.values():\n action.triggered.disconnect()\n self.menu.removeAction(action)\n action.deleteLater()\n # Remove menu\n self.menu.deleteLater()\n # Remove dockwidget\n if self.dockwidget != None:\n self.dockwidget.hide()\n self.dockwidget.deleteLater()\n # Remove\n self.iface.newProjectCreated.disconnect(self.onNewProject)\n project = QgsProject.instance()\n if (project is not None):\n project.readProject.disconnect(self.onReadProject)\n project.projectSaved.disconnect(self.onSavedProject)\n\n #--------------------------------------------------------------------------\n\n def onReadProject(self):\n \"\"\"Slot connection for the Read Project signal. It checks if the loadaed\n project is a GisFIRE project and updates the GisFIRE UI agordingly\"\"\"\n if Utils.isAGisFireProject(QgsProject.instance()):\n self._prepareProject()\n self._enableMenusAndToolbars()\n else:\n self._enableConvertProject()\n\n def onNewProject(self):\n \"\"\"Slot connection for the New Project signal. It disables all the\n GisFIRE UI because the project has to be saved to be converted to a\n GisFIRE project\"\"\"\n self._disableMenusAndToolbars()\n\n def onSavedProject(self):\n \"\"\"Slot connection for the Save Project signal. It enables convert\n project UI because once the project is saved it can be converted to a\n GisFIRE project\"\"\"\n if not Utils.isAGisFireProject(QgsProject.instance()):\n self._enableConvertProject()\n\n #--------------------------------------------------------------------------\n\n def onTogglePane(self):\n \"\"\"Event listener to show/hide the dock pane\"\"\"\n if self.dockwidget == None:\n # Create the dockwidget if it is not et created\n self.dockwidget = DockControl()\n self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockwidget)\n self.dockwidget.show()\n self.toolbarActions['gisfire'].setChecked(True)\n else:\n # Switch visibilityy and mantain consistency with toolbar button\n if self.dockwidget.isVisible():\n self.dockwidget.hide()\n self.toolbarActions['gisfire'].setChecked(False)\n else:\n self.dockwidget.show()\n self.toolbarActions['gisfire'].setChecked(True)\n\n #--------------------------------------------------------------------------\n\n def _prepareProject(self):\n \"\"\"Set up of the different layers and storage (in a geopackage file) to\n manage all the information needed by the project\"\"\"\n # Set up the geopackage\n project_name = Utils.getProjectVariable(QgsProject.instance(), 'project_basename')\n project_location = Utils.getProjectVariable(QgsProject.instance(), 'project_home')\n self.geo_package = project_location + '/' + project_name + '.gpkg'\n # Set up the layers\n ignition_layer = Layers.CreateIgnitionPointLayer(self.iface, QgsProject.instance(), self.geo_package)\n self.layers[GisFIRESettings.IGNITION_LAYER_ID] = ignition_layer\n perimeter_layer = Layers.CreatePerimeterLayer(self.iface, QgsProject.instance(), self.geo_package)\n self.layers[GisFIRESettings.PERIMETER_LAYER_ID] = perimeter_layer\n models_layer = Layers.CreateModelsLayer(self.iface, QgsProject.instance(), self.geo_package)\n self.layers[GisFIRESettings.FIREMODELS_LAYER_ID] = models_layer\n\n def onConvertProject(self):\n \"\"\"Slot of the convert button signal to convert a project to a GisFire\n Compliant project or back to a standard QGIS project\"\"\"\n if Utils.isAGisFireProject(QgsProject.instance()):\n # Remove the information of the GisFIRE project\n Utils.removeProjectVariable(QgsProject.instance(), GisFIRESettings.VERSION_VARIABLE_NAME)\n self._disableMenusAndToolbars()\n self._enableConvertProject()\n else:\n # Add the project variables and layers of a valid GisFIRE project\n Utils.setProjectVariable(QgsProject.instance(), GisFIRESettings.VERSION_VARIABLE_NAME, GisFIRESettings.VERSION)\n self._prepareProject()\n self._enableMenusAndToolbars()\n\n #--------------------------------------------------------------------------\n\n def onSetIgnitionPoint(self):\n \"\"\"Slot of the ignition point button signal to set an ignition point for\n a wildfire. This procedure sets a new slot to the onClick signal when\n the user clicks on the map canvas\"\"\"\n # Clean in case other point creation was aborted\n if not (self._pointTool is None):\n self._pointTool.canvasClicked.disconnect()\n del(self._pointTool)\n self._previousTool = None\n self._pointTool = None\n # Store the previous tool in use b the user\n canvas = self.iface.mapCanvas()\n self._previousTool = canvas.mapTool()\n # Set the tool and onClick callback\n self._pointTool = QgsMapToolEmitPoint(canvas)\n self._pointTool.canvasClicked.connect(self._setIgnitionPointCallback)\n canvas.setMapTool(self._pointTool)\n print(\"Click: set point\")\n\n def _setIgnitionPointCallback(self, point, mouse_button):\n \"\"\"Slot callback for the onClick signal on the map canvas when a new\n ignition point is assigned\n\n :param point: Point in map units where the mouse has been clicked\n :type point: QgsPointXY\n\n :param mouse_button: Characteristics of the clicked mouse button\n :type mouse_button: Qt.MouseButton\n \"\"\"\n # Clean the signal, slot and mouse icon created before\n self._pointTool.canvasClicked.disconnect()\n print(\"Click: set point CALLBACK\")\n if mouse_button == Qt.LeftButton:\n # Create the dialog in charge of collecting needed data\n dlg = DlgIgnitionPoint(self.iface.mainWindow())\n result = dlg.exec_()\n if result == QDialog.Accepted:\n Layers.AddIgnitionPoint(self.layers[GisFIRESettings.IGNITION_LAYER_ID], QgsGeometry.fromPointXY(point), dlg.getDateTime(), 0, 0)\n Layers.LayerToGeoPackage(self.layers[GisFIRESettings.IGNITION_LAYER_ID], self.geo_package)\n canvas = self.iface.mapCanvas()\n canvas.setMapTool(self._previousTool)\n del(self._pointTool)\n self._previousTool = None\n self._pointTool = None\n\n #--------------------------------------------------------------------------\n\n def onRunSimulation(self):\n pass\n\n def onStepSimulation(self):\n for i in range(0, 1):#24*60*2):\n if self._simulator['simulator'] is None:\n simulator = Simulator.SpreadSimulator()\n simulator.ignitionLayer = self.layers[GisFIRESettings.IGNITION_LAYER_ID]\n simulator.perimeterLayer = self.layers[GisFIRESettings.PERIMETER_LAYER_ID]\n simulator.fuelLayer = self.layers[GisFIRESettings.FIREMODELS_LAYER_ID]\n self._simulator['simulator'] = simulator\n self._simulator['init'] = False\n if not self._simulator['init']:\n self._simulator['init'] = True\n self._simulator['simulator'].initilizeSimulation()\n self._simulator['simulator'].step()\n if self.iface.mapCanvas().isCachingEnabled():\n self.layers[GisFIRESettings.IGNITION_LAYER_ID].triggerRepaint()\n self.layers[GisFIRESettings.PERIMETER_LAYER_ID].triggerRepaint()\n else:\n self.iface.mapCanvas().refresh()\n QCoreApplication.processEvents()\n QgsApplication.processEvents()\n\n def onPauseSimulation(self):\n pass\n\n def onStopSimulation(self):\n pass\n","repo_name":"JaumeFigueras/GisFIRE","sub_path":"GisFire.py","file_name":"GisFire.py","file_ext":"py","file_size_in_byte":22088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"19024595710","text":"\"\"\"\nFile: tasks.py\nAuthor: chess-seventh\nEmail: chess7th@pm.me\nGithub: https://github.com/chess-seventh\nDescription: Task module.\n\"\"\"\n\nimport datetime\nfrom tasklib import TaskWarrior as TW\nfrom helpers import normalize_task_date\n\n\ndef load_tasks(task_config):\n \"\"\"Load tasks based on parsed config.\n :task_config: TaskWarrior configuration.\n :returns: List of all selected the tasks.\n\n \"\"\"\n twar = TW(task_config[0])\n\n tasks = list()\n taskq = list()\n for task_project in task_config[1]:\n taskq.append(twar.tasks.pending().filter(project=task_project))\n\n for task_query in taskq:\n for task in task_query:\n tasks.append(task)\n return tasks\n\n\ndef overdue_tasks(tasks):\n \"\"\"Filter overdue tasks.\n\n :tasks: List of all tasks.\n :returns: List of all overdue tasks.\n\n \"\"\"\n today = datetime.datetime.now()\n overdue = list()\n for task in tasks:\n if task['due']:\n if today >= normalize_task_date(task, 'due'):\n overdue.append(task)\n return overdue\n\n\ndef scheduled_tasks(tasks):\n \"\"\"Filter scheduled tasks.\n\n :tasks: List of all tasks.\n :returns: List of scheduled tasks.\n\n \"\"\"\n today = datetime.datetime.now()\n scheduled = list()\n for task in tasks:\n if task['scheduled']:\n if today >= normalize_task_date(task, 'scheduled'):\n scheduled.append(task)\n return scheduled\n\n\ndef not_date_tasks(tasks):\n \"\"\"Filter tasks with no due date\n\n :tasks: List of all tasks.\n :returns: List of scheduled tasks.\n\n \"\"\"\n not_due = list()\n for task in tasks:\n if not task['due']:\n # output_task(task)\n not_due.append(task)\n return not_due\n","repo_name":"chess-seventh/TaskKhalReschedulWarrior","sub_path":"src/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"32391038989","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom movie_app.models import Movie, Director, Review\nfrom movie_app.serializers import MovieSerializer, DirectorSerializer, ReviewSerializer, MovieReviewSerializer\n\n\n@api_view(['GET'])\ndef director_list_api_view(request):\n directors = Director.objects.all()\n serializer = DirectorSerializer(directors, many=True)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef director_detail_api_view(request, id):\n try:\n director = Director.objects.get(id=id)\n except Director.DoesNotExist:\n return Response(data={'message': 'Data not found!'},\n status=status.HTTP_404_NOT_FOUND)\n serializer = DirectorSerializer(director, many=False)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef movie_list_api_view(request):\n movies = Movie.objects.all()\n serializer = MovieSerializer(movies, many=True)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef movie_detail_api_view(request, id):\n try:\n movie = Movie.objects.get(id=id)\n except Movie.DoesNotExist:\n return Response(data={'message': 'Data not found!'},\n status=status.HTTP_404_NOT_FOUND)\n serializer = MovieSerializer(movie, many=False)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef review_list_api_view(request):\n reviews = Review.objects.all()\n serializer = ReviewSerializer(reviews, many=True)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef review_detail_api_view(request, id):\n try:\n review = Review.objects.get(id=id)\n except Review.DoesNotExist:\n return Response(data={'message': 'Data not found!'},\n status=status.HTTP_404_NOT_FOUND)\n serializer = ReviewSerializer(review, many=False)\n return Response(data=serializer.data)\n\n\n@api_view(['GET'])\ndef review_movies_view(request):\n if request.method == 'GET':\n movie = Movie.objects.all()\n serializer = MovieReviewSerializer(movie, many=True)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n","repo_name":"ossipova/Affiche","sub_path":"movie_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"35561260415","text":"import botocore\nfrom botocore.compat import HAS_CRT\nfrom tests import requires_crt\nfrom tests.unit.auth.test_signers import (\n TestS3SigV4Auth,\n TestSigV4Presign,\n TestSigV4Resign,\n)\n\n\n@requires_crt()\nclass TestCrtS3SigV4Auth(TestS3SigV4Auth):\n # Repeat TestS3SigV4Auth tests, but using CRT signer\n if HAS_CRT:\n AuthClass = botocore.crt.auth.CrtS3SigV4Auth\n\n\n@requires_crt()\nclass TestCrtSigV4Resign(TestSigV4Resign):\n # Run same tests against CRT auth\n if HAS_CRT:\n AuthClass = botocore.crt.auth.CrtSigV4Auth\n\n\n@requires_crt()\nclass TestCrtSigV4Presign(TestSigV4Presign):\n # Run same tests against CRT auth\n if HAS_CRT:\n AuthClass = botocore.crt.auth.CrtSigV4QueryAuth\n","repo_name":"qtdslly/oss-sdk-python","sub_path":"src/botocore/tests/unit/crt/auth/test_crt_signers.py","file_name":"test_crt_signers.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"17401611111","text":"import itertools\nimport os\n\nimport numpy as np\nimport OpenGL.GL as gl # imagine a python binding with gl.Begin not gl.glBegin\nfrom OpenGL.GL.shaders import compileShader, compileProgram\n\nfrom . import bufferize\nfrom . import draw\n\n\nclass Manager:\n \"\"\"Manages OpenGL buffers and gives handles for rendering & hiding objects\"\"\"\n def __init__(self, draw_distance: float, field_of_view: float, memory_limit: int):\n self.draw_distance = draw_distance\n self.field_of_view = field_of_view\n MB = 10 ** 6 # ~ 1 Megabyte\n self.memory_limit = memory_limit * MB\n # ^ can't check against the GPU's limits until AFTER init_GL is called\n self.render_mode = \"flat\"\n self.buffer_update_queue = []\n # ^ [(buffer, start, length, data)]\n # goes directly into glBufferSubData()\n self.vertex_buffer_size = self.memory_limit // 2\n self.index_buffer_size = self.memory_limit // 2\n self.buffer_location = {}\n # ^ renderable: {\"vertex\": (start, length),\n # \"index\": (start, length)}\n # renderable = (\"brush\", brush.id)\n # renderable = (\"displacement\", (brush.id, side.id))\n # renderable = (\"obj_model\", obj_model.filename)\n # SUB-OBJECTS NOTES\n # for tinting in the editor (selection, displacement draw mode etc.)...\n # we need to identify the sub-objects of a givern renderable\n # sub-spans, which would be calculated in a given renderable's bufferize function\n\n # OBJ MODEL: obj_model.o[], obj_model.g[]\n # BRUSH: brush.faces\n # DISPLACEMENT: displacement.triangle (is_walkable tint)\n\n # converting buffer data out into other objects could be VERY cool\n self.buffer_allocation_map = {\"vertex\": {\"brush\": [],\n \"displacement\": [],\n \"obj_model\": []},\n \"index\": {\"brush\": [],\n \"displacement\": [],\n \"obj_model\": []}}\n # ^ buffer: {type: [(start, length)]}\n self.draw_calls = {\"brush\": [], \"displacement\": [], \"obj_model\": []}\n # ^ {renderable_type: [span, ...]}\n # where span = (start, length)\n self.dont_draw = set()\n # ^ {renderable, ...}\n\n self.dynamics = dict()\n # {renderable: {\"position\": [x, y, z]}}\n\n def initialise(self, shader_folder):\n gl.glClearColor(0.0, 0.0, 0.0, 0.0)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glFrontFace(gl.GL_CW)\n gl.glCullFace(gl.GL_BACK)\n gl.glPointSize(4)\n gl.glPolygonMode(gl.GL_BACK, gl.GL_LINE)\n self.compile_shaders(shader_folder)\n\n def compile_shaders(self, folder):\n\n def make_shader(file, shader_type):\n return compileShader(open(os.path.join(folder, file), \"rb\"), shader_type)\n\n # shader construction could be automated some\n # the shader names have a clear format: f\"{renderable}.vert\", f\"{style}_{renderable}.frag\"\n # use os.listdir to assemble all shaders?\n vert_brush = make_shader(\"brush.vert\", gl.GL_VERTEX_SHADER)\n vert_displacement = make_shader(\"displacement.vert\", gl.GL_VERTEX_SHADER)\n vert_obj_model = make_shader(\"obj_model.vert\", gl.GL_VERTEX_SHADER)\n frag_flat_brush = make_shader(\"flat_brush.frag\", gl.GL_FRAGMENT_SHADER)\n frag_flat_displacement = make_shader(\"flat_displacement.frag\", gl.GL_FRAGMENT_SHADER)\n frag_flat_obj_model = make_shader(\"flat_obj_model.frag\", gl.GL_FRAGMENT_SHADER)\n frag_stripey_brush = make_shader(\"stripey_brush.frag\", gl.GL_FRAGMENT_SHADER)\n self.shader = {\"flat\": {}, \"stripey\": {}, \"textured\": {}, \"shaded\": {}}\n # ^ {\"render_mode\": {\"target\": program}}\n self.shader[\"flat\"][\"brush\"] = compileProgram(vert_brush, frag_flat_brush)\n self.shader[\"flat\"][\"displacement\"] = compileProgram(vert_displacement, frag_flat_displacement)\n self.shader[\"flat\"][\"obj_model\"] = compileProgram(vert_obj_model, frag_flat_obj_model)\n self.shader[\"stripey\"][\"brush\"] = compileProgram(vert_brush, frag_stripey_brush)\n for render_mode_dict in self.shader.values():\n for program in render_mode_dict.values():\n gl.glLinkProgram(program)\n self.uniform = {\"flat\": {\"brush\": {}, \"displacement\": {},\n \"obj_model\": {\"location\": None}},\n \"stripey\": {\"brush\": {}},\n \"textured\": {},\n \"shaded\": {}}\n # ^ style: {target: {uniform: location}}\n for style, targets in self.uniform.items():\n for target in targets:\n shader = self.shader[style][target]\n gl.glUseProgram(shader)\n for uniform in self.uniform[style][target]:\n self.uniform[style][target][uniform] = gl.glGetUniformLocation(shader, uniform)\n self.uniform[style][target][\"matrix\"] = gl.glGetUniformLocation(shader, \"MVP_matrix\")\n gl.glUseProgram(0)\n # Buffers\n self.VERTEX_BUFFER, self.INDEX_BUFFER = gl.glGenBuffers(2)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.VERTEX_BUFFER)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.vertex_buffer_size, None, gl.GL_DYNAMIC_DRAW)\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.INDEX_BUFFER)\n gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, self.index_buffer_size, None, gl.GL_DYNAMIC_DRAW)\n # https://github.com/snake-biscuits/QtPyHammer/wiki/Rendering:-Vertex-Format\n gl.glEnableVertexAttribArray(0) # vertex_position\n gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 44, gl.GLvoidp(0))\n gl.glEnableVertexAttribArray(1) # vertex_normal (brush only)\n gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_TRUE, 44, gl.GLvoidp(12))\n gl.glEnableVertexAttribArray(2) # vertex_uv\n gl.glVertexAttribPointer(2, 2, gl.GL_FLOAT, gl.GL_FALSE, 44, gl.GLvoidp(24))\n gl.glEnableVertexAttribArray(3) # editor_colour\n gl.glVertexAttribPointer(3, 3, gl.GL_FLOAT, gl.GL_FALSE, 44, gl.GLvoidp(32))\n gl.glEnableVertexAttribArray(4) # blend_alpha (displacement only)\n gl.glVertexAttribPointer(4, 1, gl.GL_FLOAT, gl.GL_FALSE, 44, gl.GLvoidp(32))\n\n def draw(self):\n gl.glUseProgram(0)\n draw.dot_grid(-2048, 2048, -2048, 2048, 64)\n draw.origin_marker()\n # TODO: dither transparency for tooltextures (skip, hint, trigger, clip)\n for renderable_type, spans in self.draw_calls.items():\n gl.glUseProgram(self.shader[self.render_mode][renderable_type])\n for start, length in spans:\n count = length // 4\n gl.glDrawElements(gl.GL_TRIANGLES, count, gl.GL_UNSIGNED_INT, gl.GLvoidp(start))\n # render models separately, since they are instanced\n for renderable in self.dynamics:\n # translate with shader uniforms\n renderable_type, _id = renderable\n location_uniform = self.uniform[self.render_mode][renderable_type][\"location\"]\n position = self.dynamics[renderable][\"position\"]\n gl.glUniform3f(location_uniform, *position)\n gl.glUseProgram(self.shader[self.render_mode][renderable[0]])\n start, length = self.buffer_location[renderable][\"index\"]\n count = length // 4\n gl.glDrawElements(gl.GL_TRIANGLES, count, gl.GL_UNSIGNED_INT, gl.GLvoidp(start))\n\n def update(self):\n \"\"\"Updates buffers & shader uniforms\"\"\"\n if len(self.buffer_update_queue) > 0:\n # do one buffer update\n update = self.buffer_update_queue.pop(0)\n buffer, start, length, data = update\n gl.glBufferSubData(buffer, start, length, data)\n # update shader uniforms\n model_view_matrix = gl.glGetFloatv(gl.GL_MODELVIEW_MATRIX)\n for renderable_type in self.shader[self.render_mode]:\n gl.glUseProgram(self.shader[self.render_mode][renderable_type])\n if \"matrix\" in self.uniform[self.render_mode][renderable_type]:\n location = self.uniform[self.render_mode][renderable_type][\"matrix\"]\n gl.glUniformMatrix4fv(location, 1, gl.GL_FALSE, model_view_matrix)\n\n def track_span(self, buffer, renderable_type, span_to_track):\n # adds span_to_track to self.buffer_allocation_map\n # allowing for proper memory management of buffers to continue\n target = self.buffer_allocation_map[buffer][renderable_type]\n updated_map = add_span(target, span_to_track)\n self.buffer_allocation_map[buffer][renderable_type] = updated_map\n\n def untrack_span(self, buffer, renderable_type, span_to_untrack):\n # removes span from self.buffer_allocation_map\n # doesn't affect draw_calls or buffer_location\n # the data remains in the buffer, allowing for an \"undo\"\n # so long as the data hasn't been overwritten\n target = self.buffer_allocation_map[buffer][renderable_type]\n updated_map = remove_span(target, span_to_untrack)\n self.buffer_allocation_map[buffer][renderable_type] = updated_map\n\n def update_mapping(self, buffer, renderable_type, start, ids, lengths):\n \"\"\"Updates self.buffer_location & self.draw_calls\"\"\"\n span = (start, sum(lengths))\n self.track_span(buffer, renderable_type, span)\n if buffer == \"index\":\n self.draw_calls[renderable_type] = add_span(self.draw_calls[renderable_type], span)\n if renderable_type == \"displacement\": # hide displacement brush\n brush_ids = {brush_id for brush_id, side_id in ids}\n brush_spans = []\n for brush_id in brush_ids:\n span = self.buffer_location[(\"brush\", brush_id)][\"index\"]\n brush_spans = add_span(brush_spans, span)\n for span in brush_spans:\n self.draw_calls[\"brush\"] = remove_span(self.draw_calls[\"brush\"], span)\n for renderable_id, length in zip(ids, lengths):\n renderable = (renderable_type, renderable_id)\n if renderable not in self.buffer_location:\n self.buffer_location[renderable] = dict()\n self.buffer_location[renderable][buffer] = (start, length)\n start += length\n\n def find_gaps(self, buffer=\"vertex\", preferred_type=None, minimum_size=1):\n \"\"\"Generator which yields a (start, length) span for each gap which meets requirements\"\"\"\n if minimum_size < 1:\n raise RuntimeError(\"Can't search for gap smaller than 1 byte\")\n # \"spans\" are spaces in buffers holding data that is being used\n # \"gaps\" are unused spaces in memory that data can be assigned to\n # both are recorded with a tuple: (start, length)\n limit_of = {\"vertex\": self.vertex_buffer_size, \"index\": self.index_buffer_size}\n limit = limit_of[buffer]\n buffer_map = self.buffer_allocation_map[buffer]\n if sum([len(buffer_map[r]) for r in buffer_map]) == 0:\n # buffer is empty, preffered_type doesn't matter\n yield (0, limit)\n return\n if preferred_type not in (None, *buffer_map.keys()):\n raise RuntimeError(\"Invalid preferred_type: {}\".format(preferred_type))\n span_type = {start: type for type in buffer_map for start in buffer_map[type]}\n # ^ {\"type\": [*spans]} -> {span: \"type\"}\n filled_spans = sorted(span_type, key=lambda s: s[0])\n # ^ all occupied spans, sorted by start\n prev_span = filled_spans.pop(0)\n prev_span_end = sum(prev_span)\n for span in filled_spans:\n span_start, span_length = span\n gap_start = prev_span_end + 1\n gap_length = span_start - gap_start\n if gap_length >= minimum_size:\n gap = (gap_start, gap_length)\n if preferred_type in (None, span_type[span], span_type[prev_span]):\n # this gap touches our preferred_type\n yield gap\n prev_span = span\n prev_span_end = span_start + span_length\n if prev_span_end < limit: # gap at tail of buffer\n gap_start = prev_span_end\n gap = (gap_start, limit - gap_start)\n if preferred_type in (None, span_type[prev_span]):\n # this gap touches our preferred_type\n yield gap\n\n def add_brushes(self, *brushes):\n brush_data = dict()\n # ^ {brush.id: (vertex_data, index_data)}\n displacement_data = dict()\n # ^ {(brush.id, face.id): (vertex_data, index_data)}\n for brush in brushes:\n brush_data[brush.id] = bufferize.brush(brush)\n if brush.is_displacement:\n for face in brush.faces:\n if not hasattr(face, \"displacement\"):\n continue\n data = bufferize.displacement(face)\n displacement_data[(brush.id, face.id)] = data\n self.add_renderables(\"brush\", brush_data)\n self.add_renderables(\"displacement\", displacement_data)\n\n def add_obj_models(self, *obj_models):\n obj_model_data = dict()\n # ^ {_id: (vertex_data, index_data)}\n for obj_model in obj_models:\n obj_model_data[obj_model.name] = bufferize.obj_model(obj_model)\n self.add_renderables(\"obj_model\", obj_model_data)\n\n def add_renderables(self, renderable_type, renderables):\n \"\"\"Add data to the appropriate GPU buffers\"\"\"\n # renderables = {_id: (vertices, indices)}\n # self.buffer_location[(renderable_type, _id)]\n # _id may be an int or tuple of ints (\"displacement\", (brush.id, face.id))\n vertex_gaps = self.find_gaps(buffer=\"vertex\")\n vertex_gaps = {g: [0, [], []] for g in vertex_gaps}\n index_gaps = self.find_gaps(buffer=\"index\", preferred_type=renderable_type)\n index_gaps = {g: [0, [], []] for g in index_gaps}\n index_gaps.update({g: [0, [], []] for g in self.find_gaps(buffer=\"index\")})\n # ^ gap: [used_length, [ids], [data]]\n for _id in renderables:\n vertex_data, index_data = renderables[_id]\n vertex_data_length = len(vertex_data) * 4\n for gap in vertex_gaps:\n gap_start, gap_length = gap\n used_length = vertex_gaps[gap][0]\n free_length = gap_length - used_length\n if vertex_data_length <= free_length:\n vertex_gaps[gap][0] += vertex_data_length\n vertex_gaps[gap][1].append(_id)\n vertex_gaps[gap][2].append(vertex_data)\n index_offset = (gap_start + used_length) // 44\n break\n index_data_length = len(index_data) * 4\n for gap in index_gaps:\n gap_start, gap_length = gap\n used_length = index_gaps[gap][0]\n free_length = gap_length - used_length\n if index_data_length <= free_length:\n index_gaps[gap][0] += index_data_length\n index_gaps[gap][1].append(_id)\n index_data = [i + index_offset for i in index_data]\n index_gaps[gap][2].append(index_data)\n break\n for gap in vertex_gaps:\n if vertex_gaps[gap][0] == 0:\n continue # no data to write in this gap\n start = gap[0]\n used_length = vertex_gaps[gap][0]\n flattened_data = list(itertools.chain(*vertex_gaps[gap][2]))\n vertex_data = np.array(flattened_data, dtype=np.float32)\n update = (gl.GL_ARRAY_BUFFER, start, used_length, vertex_data)\n self.buffer_update_queue.append(update)\n ids = vertex_gaps[gap][1]\n lengths = [len(d) * 4 for d in vertex_gaps[gap][2]]\n self.update_mapping(\"vertex\", renderable_type, start, ids, lengths)\n for gap in index_gaps:\n if index_gaps[gap][0] == 0:\n continue # no data to write in this gap\n start = gap[0]\n used_length = index_gaps[gap][0]\n flattened_data = list(itertools.chain(*index_gaps[gap][2]))\n index_data = np.array(flattened_data, dtype=np.uint32)\n update = (gl.GL_ELEMENT_ARRAY_BUFFER, start, used_length, index_data)\n self.buffer_update_queue.append(update)\n ids = index_gaps[gap][1]\n lengths = [len(d) * 4 for d in index_gaps[gap][2]]\n self.update_mapping(\"index\", renderable_type, start, ids, lengths)\n\n def hide(self, renderable):\n # print(f\"Hiding {renderable}\")\n self.dont_draw.add(renderable)\n renderable_type = renderable[0]\n span = self.buffer_location[renderable][\"index\"]\n span_list = self.draw_calls[renderable_type]\n self.draw_calls[renderable_type] = remove_span(span_list, span)\n\n def show(self, renderable):\n assert renderable in self.dont_draw # a bug worth checking for\n # print(f\"Showing {renderable}\")\n self.dont_draw.discard(renderable)\n renderable_type = renderable[0]\n span = self.buffer_location[renderable][\"index\"]\n span_list = self.draw_calls[renderable_type]\n self.draw_calls[renderable_type] = add_span(span_list, span)\n\n\ndef add_span(span_list, span):\n if len(span_list) == 0:\n return [span]\n start, length = span\n end = start + length\n for i, span in enumerate(span_list):\n S, L = span\n E = S + L\n if S < end and E < start:\n continue # (S, L) is before span_to_track and doesn't touch it\n elif end < S: # span leads (S, L) without touching it\n span_list.insert(i, (start, length))\n break\n elif S == end or E == start: # span leads (S, L) or span tails (S, L)\n span_list.pop(i)\n new_start = min(start, S)\n span_list.insert(i, (new_start, L + length))\n break\n else: # span tails the final (S, L) without touching it\n span_list.append((start, length))\n return span_list\n\n\ndef remove_span(span_list, span):\n start, length = span\n end = start + length\n out = []\n for S, L in span_list:\n E = S + L\n # special cases\n if start <= S < E <= end: # span ecclipses (S, L)\n continue\n if S < start < end < E: # (S, L) ecclipses span\n out.append((S, start - S))\n out.append((end, E - end))\n continue\n # basic cases\n if end < S: # span leads (S, L)\n out.append((S, L))\n continue\n if start <= S < end < E: # span overlaps start of (S, L)\n out.append((end, E - end))\n continue\n if S < start < E <= end: # span overlaps tail of (S, L)\n out.append((S, start - S))\n continue\n if E <= start: # span tails (S, L)\n out.append((S, L))\n return out\n","repo_name":"QtPyHammer-devs/QtPyHammer","sub_path":"QtPyHammer/utilities/render/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":19189,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"49"} +{"seq_id":"2857510099","text":"##########################################\r\n# 2019.5.5\r\n# compare acid seq of two group\r\n# To xushuye\r\n##########################################\r\nimport os,sys\r\nfrom collections import Counter\r\n\r\ngroup1_fls = [os.path.join(boot,fl) for boot,dirs,fls in os.walk(os.path.abspath(sys.argv[1])) for fl in fls if fl.endswith('gbk') and not fl.startswith('.')]\r\ncount1 = len(group1_fls)\r\nprint(\"{} have {} samples\".format(sys.argv[1],count1))\r\n\r\ngroup2_fls = [os.path.join(boot,fl) for boot,dirs,fls in os.walk(os.path.abspath(sys.argv[2])) for fl in fls if fl.endswith('gbk') and not fl.startswith('.')]\r\n# print(group2_fls)\r\ncount2 = len(group2_fls)\r\nprint(\"{} have {} samples\".format(sys.argv[2],count2))\r\nprint('-'*25+'\\n')\r\n\r\ndef obtain_acidseq_base(content):\r\n return ''.join(content.split(r'/translation=\"')[1].split(r'\"')[0].split('\\n')).replace(' ','')\r\n\r\ndef obtain_acidseq(filepath):\r\n f_content = [obtain_acidseq_base(content) for content in open(filepath).read().split(r'CDS')[1:] if content.find(r'/translation=\"')!=-1]\r\n return f_content\r\n\r\ndef obtain_GeneAcid(filepath):\r\n f_dict = {''.join(content.split(r'/translation=\"')[1].split(r'\"')[0].split('\\n')).replace(' ',''):content.split(r'gene=\"')[1].split(r'\"')[0] for content in open(filepath).read().split(r'CDS')[1:] if content.find(\"/gene\") !=-1}\r\n return f_dict\r\n\r\nacids1 = [acidseq for fl in group1_fls for acidseq in obtain_acidseq(fl)]\r\nacids2 = [acidseq for fl in group2_fls for acidseq in obtain_acidseq(fl)]\r\nacids1_dict,acids2_dict= Counter(acids1),Counter(acids2)\r\n\r\n[print(seq) for seq,num in acids1_dict.items() if num>34]\r\n\r\ntemp = [seq for seq,num in acids1_dict.items() if seq in acids2_dict if (num/count1 - acids2_dict[seq]/count2) > float(sys.argv[3])]\r\n\r\ndict_GeneAcid = {}\r\n[dict_GeneAcid.update(obtain_GeneAcid(fl)) for fl in (group1_fls+group2_fls)]\r\n\r\nprint('\\t'.join(['gene',sys.argv[1],sys.argv[2]])+'\\n')\r\n[print('\\t'.join([dict_GeneAcid[i],str(acids1_dict[i]),str(acids2_dict[i])])) for i in temp if i in dict_GeneAcid]\r\n[print('\\t'.join(['',str(acids1_dict[i]),str(acids2_dict[i])])) for i in temp if i not in dict_GeneAcid]","repo_name":"quincy-deng/working_in_hospital","sub_path":"python/Compare_two-group-acidSeq.py","file_name":"Compare_two-group-acidSeq.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"2605553934","text":"N=529\nNBIN=bin(N)[2:]\ncount=0\nj=0\ncountlist=[]\nfor i in range(len(NBIN)-1):\n\ti+=1\n\tif NBIN[i]=='0':\n\t\tcount+=1\n\telse:\n\t\tcountlist.append(count)\n\t\tcount=0\n\t\tj+=1\nif countlist:\n\treturn max(countlist)\nelse:\n\treturn 0","repo_name":"ShivamMahajan/Codemania","sub_path":"Python/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"25942471460","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom crawlr.models import Category, Route\nfrom crawlr.forms import CategoryForm, RouteForm, UserForm, UserProfileForm\n\ndef index(request):\n #A dictionary providing context for the template engine\n #shows all available categories, and the top 5 most liked routes overall\n category_list = Category.objects.all()\n liked_routes = Route.objects.order_by('-likes')[:5]\n\n context_dict = {'categories': category_list, 'likes' : liked_routes }\n\n visitor_cookie_handler(request)\n context_dict['visits'] = request.session['visits']\n\n # Return a rendered response to the client\n response = render(request, 'crawlr/index.html', context=context_dict)\n\n return response\n\ndef about(request):\n visitor_cookie_handler(request)\n context_dict = {'visits':request.session['visits'],}\n return render(request, 'crawlr/about.html', context = context_dict)\n\ndef show_category(request, category_name_slug):\n context_dict = {}\n try:\n category = Category.objects.get(slug=category_name_slug)\n routes = Route.objects.filter(category=category).order_by('-likes')\n context_dict['routes']= routes\n context_dict['category']= category\n except Category.DoesNotExist:\n context_dict['route']= None\n context_dict['category']= None\n return render(request, 'crawlr/category.html', context_dict)\n\ndef show_route(request, route_name_slug):\n context_dict = {}\n try:\n route = Route.objects.get(slug=route_name_slug)\n context_dict['route']= route\n if request.user in route.liked_by.all():\n context_dict['liked'] = True\n else:\n context_dict['liked'] = False\n except Category.DoesNotExist:\n context_dict['route']= None\n return render(request, 'crawlr/route.html', context=context_dict)\n\ndef find_directions(request):\n context_dict = {}\n return render(request, 'crawlr/find_directions.html', context=context_dict)\n\n\ndef show_profile(request, username):\n context_dict = {}\n if username_slug:\n user = User.objects.get(username=username)\n\n\n context_dict['user'] = user\n try:\n routes = Route.objects.get(creator=username_slug)\n context_dict['routes'] = routes\n except Route.DoesNotExist:\n context_dict['routes'] = None\n\n return render(request, 'crawlr/profile.html', context=context_dict)\n\ndef show_profile(request, username):\n context_dict = {}\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n return HttpResponseRedirect(reverse('index'))\n context_dict['user'] = user\n print(user)\n try:\n routes = Route.objects.filter(created_by=user)\n context_dict['routes'] = routes\n except Route.DoesNotExist:\n context_dict['routes'] = None\n\n return render(request, 'crawlr/profile.html',context=context_dict)\n\n@login_required\ndef add_category(request):\n form = CategoryForm()\n if request.method == 'POST':\n form = CategoryForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return index(request)\n else:\n print(form.errors)\n return render(request, 'crawlr/add_category.html', {'form':form})\n\n\n@login_required\ndef add_route(request):\n form = RouteForm(initial = {\"created_by\": request.user})\n if request.method == 'POST':\n form = RouteForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return index(request)\n else:\n print(form.errors)\n return render(request, 'crawlr/add_route.html', {'form':form})\n\n@login_required\ndef like_route(request):\n print(\"like request received\")\n route_name= None\n if request.method == 'GET':\n route_name = request.GET['route_slug']\n likes = 0\n if route_name:\n this_route = Route.objects.get(slug=route_name)\n if this_route:\n if request.user in this_route.liked_by.all():\n likes = this_route.likes\n this_route.save()\n else:\n likes = this_route.likes + 1\n this_route.likes = likes\n this_route.liked_by.add(request.user)\n this_route.save()\n return HttpResponse(likes)\n\n\ndef register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n #Hashes password\n user.set_password(user.password)\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n profile.save()\n registered = True\n else:\n print(user_form.errors, profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n return render(request, 'crawlr/register.html', {'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered})\n\ndef user_login(request):\n if request.method == 'POST':\n username=request.POST.get('username')\n password=request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('index'))\n else:\n return render(request, 'crawlr/login.html', {'errormessage':'Your account has been disabled.'})\n else:\n print(\"Invalid login details: {0}, {1}\".format(username, password))\n return render(request, 'crawlr/login.html', {'errormessage': 'Invalid username or password.'})\n else:\n return render(request, 'crawlr/login.html', {})\n\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('index'))\n\ndef get_server_side_cookie(request, cookie, default_val=None):\n val = request.session.get(cookie)\n if not val:\n val = default_val\n return val\n\ndef visitor_cookie_handler(request):\n visits = int(get_server_side_cookie(request, 'visits', '1'))\n\n last_visit_cookie =get_server_side_cookie(request, 'last_visit', str(datetime.now()))\n last_visit_time = datetime.strptime(last_visit_cookie[:-7], '%Y-%m-%d %H:%M:%S')\n if (datetime.now() - last_visit_time).days > 0:\n visits = visits + 1\n response.set_cookie('last_visit', str(datetime.now()))\n else:\n visits = 1\n request.session['last_visit'] = last_visit_cookie\n request.session['visits'] = visits\n","repo_name":"gosgjkaj/CrawlR","sub_path":"crawlr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"44348263921","text":"import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport time\n\n\nclass Identity(nn.Module):\n def __init__(self, *args):\n super(Identity, self).__init__()\n\n def forward(self, x): \n return x\n\nclass Layer(nn.Module):\n def __init__(self, in_planes, planes, BN):\n super(Layer, self).__init__()\n self.bn = BN(planes)\n self.conv = nn.Conv2d(in_planes, planes, 3, padding=1, bias=False)\n self.act = nn.ReLU()\n\n def forward(self, x): \n return self.act(self.bn(self.conv(x)))\n\nclass Net(nn.Module):\n def __init__(self, BN, n_layers=3):\n super(Net, self).__init__()\n self.in_planes = 3 \n self.layers = self._make_layers(Layer, 64, n_layers, BN) \n self.linear = nn.Linear(64, 10) \n\n def _make_layers(self, block, planes, num_blocks, BN):\n strides = [1] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, BN))\n self.in_planes = planes\n return nn.Sequential(*layers)\n\n def forward(self, x): \n out = self.layers(x)\n out = F.avg_pool2d(out, out.size(2))\n out = out.view(out.size(0), -1) \n return self.linear(out)\n\ndef do_epoch(net, criterion, optimizer, lam):\n net.train()\n aggr_loss = count = 0 \n for _ in range(1000):\n inputs = Variable(torch.cuda.FloatTensor(128,3,32,32).normal_(), requires_grad=True)\n targets = Variable(torch.LongTensor(128).random_(0, 10).cuda())\n outputs = net(inputs)\n\n optimizer.zero_grad()\n loss = criterion(outputs, targets)\n loss.backward(create_graph=(lam > 0)) \n\n # gradient penalty\n if lam > 0:\n gpenalty = inputs.grad.view(inputs.size(0), -1).add(1e-5).norm(1, 1).mean()\n (lam * gpenalty).backward()\n\n optimizer.step()\n\n count += 1\n aggr_loss += loss.data[0]\n\n return aggr_loss / count\n\ndef main(net, lam):\n net.cuda()\n cudnn.benchmark = True\n\n criterion = torch.nn.CrossEntropyLoss()\n criterion.cuda()\n\n optimizer = torch.optim.SGD(net.parameters(), lr=.001, momentum=0.9)\n\n for epoch in range(1):\n time_start = time.time()\n\n loss = do_epoch(net, criterion, optimizer, lam)\n\n print(\"epoch %2d loss %.2f time %d\" % (epoch, loss, time.time()-time_start))\n\nn_layers = 3 \nBN = Identity\nprint('No BN and no lambda')\nmain(Net(BN, n_layers), 0.) \n\nprint('No BN and with lambda')\nmain(Net(BN, n_layers), .001)\n\nBN = nn.BatchNorm2d\nprint('With BN and no lambda')\nmain(Net(BN, n_layers), 0.)\n\nprint('With BN and with lambda')\nmain(Net(BN, n_layers), .001)\n","repo_name":"fartashf/gvar_code","sub_path":"test/test_bn_speed.py","file_name":"test_bn_speed.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"49"}